diff --git a/.baseimage-release b/.baseimage-release new file mode 100644 index 00000000000..6e8bf73aa55 --- /dev/null +++ b/.baseimage-release @@ -0,0 +1 @@ +0.1.0 diff --git a/.gitignore b/.gitignore index 23147526e14..7a9e3f87b43 100755 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,8 @@ go-carpet-coverage* # make node-sdk copied files sdk/node/lib/protos/* report.xml +.settings +.project +.gradle +build/ +bin/ diff --git a/Makefile b/Makefile index dd9dbce37d2..44a8838f215 100644 --- a/Makefile +++ b/Makefile @@ -36,11 +36,27 @@ # - clean - cleans the build area # - dist-clean - superset of 'clean' that also removes persistent state -PROJECT_NAME=hyperledger/fabric +PROJECT_NAME = hyperledger/fabric +BASE_VERSION = 0.7.0 +IS_RELEASE = false + +ifneq ($(IS_RELEASE),true) +EXTRA_VERSION ?= snapshot-$(shell git rev-parse --short HEAD) +PROJECT_VERSION=$(BASE_VERSION)-$(EXTRA_VERSION) +else +PROJECT_VERSION=$(BASE_VERSION) +endif + PKGNAME = github.com/$(PROJECT_NAME) +GO_LDFLAGS = -X github.com/hyperledger/fabric/metadata.Version=$(PROJECT_VERSION) CGO_FLAGS = CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" UID = $(shell id -u) -CHAINTOOL_RELEASE=v0.8.1 +ARCH=$(shell uname -m) +CHAINTOOL_RELEASE=v0.9.1 +BASEIMAGE_RELEASE=$(shell cat ./.baseimage-release) + +DOCKER_TAG=$(ARCH)-$(PROJECT_VERSION) +BASE_DOCKER_TAG=$(ARCH)-$(BASEIMAGE_RELEASE) EXECUTABLES = go docker git curl K := $(foreach exec,$(EXECUTABLES),\ @@ -50,13 +66,10 @@ K := $(foreach exec,$(EXECUTABLES),\ SUBDIRS = gotools sdk/node SUBDIRS:=$(strip $(SUBDIRS)) -# Make our baseimage depend on any changes to images/base or scripts/provision -BASEIMAGE_RELEASE = $(shell cat ./images/base/release) -BASEIMAGE_DEPS = $(shell git ls-files images/base scripts/provision) - +GOSHIM_DEPS = $(shell ./scripts/goListFiles.sh github.com/hyperledger/fabric/core/chaincode/shim | sort | uniq) JAVASHIM_DEPS = $(shell git ls-files core/chaincode/shim/java) PROJECT_FILES = $(shell git ls-files) -IMAGES = base src ccenv peer membersrvc javaenv orderer +IMAGES = src ccenv peer membersrvc javaenv orderer all: peer membersrvc checks @@ -80,12 +93,19 @@ membersrvc: build/bin/membersrvc membersrvc-image: build/image/membersrvc/.dummy unit-test: peer-image gotools - @./scripts/goUnitTests.sh + @./scripts/goUnitTests.sh $(DOCKER_TAG) "$(GO_LDFLAGS)" + +node-sdk: sdk/node + +node-sdk-unit-tests: peer membersrvc + cd sdk/node && $(MAKE) unit-tests + +unit-tests: unit-test node-sdk-unit-tests .PHONY: images images: $(patsubst %,build/image/%/.dummy, $(IMAGES)) -behave-deps: images peer +behave-deps: images peer build/bin/block-listener behave: behave-deps @echo "Running behave tests" @cd bddtests; behave $(BEHAVE_OPTS) @@ -108,14 +128,14 @@ linter: gotools # we may later inject the binary into a different docker environment # This is necessary since we cannot guarantee that binaries built # on the host natively will be compatible with the docker env. -%/bin/protoc-gen-go: build/image/base/.dummy Makefile +%/bin/protoc-gen-go: Makefile @echo "Building $@" @mkdir -p $(@D) @docker run -i \ --user=$(UID) \ -v $(abspath vendor/github.com/golang/protobuf):/opt/gopath/src/github.com/golang/protobuf \ -v $(abspath $(@D)):/opt/gopath/bin \ - hyperledger/fabric-baseimage go install github.com/golang/protobuf/protoc-gen-go + hyperledger/fabric-baseimage:$(BASE_DOCKER_TAG) go install github.com/golang/protobuf/protoc-gen-go build/bin/chaintool: Makefile @echo "Installing chaintool" @@ -127,6 +147,11 @@ build/bin/chaintool: Makefile @mkdir -p $(@D) @cp $^ $@ +# JIRA FAB-243 - Mark build/docker/bin artifacts explicitly as secondary +# since they are never referred to directly. This prevents +# the makefile from deleting them inadvertently. +.SECONDARY: build/docker/bin/peer build/docker/bin/membersrvc + # We (re)build a package within a docker context but persist the $GOPATH/pkg # directory so that subsequent builds are faster build/docker/bin/%: build/image/src/.dummy $(PROJECT_FILES) @@ -137,7 +162,8 @@ build/docker/bin/%: build/image/src/.dummy $(PROJECT_FILES) --user=$(UID) \ -v $(abspath build/docker/bin):/opt/gopath/bin \ -v $(abspath build/docker/pkg):/opt/gopath/pkg \ - hyperledger/fabric-src go install github.com/hyperledger/fabric/$(TARGET) + hyperledger/fabric-src:$(DOCKER_TAG) go install -ldflags "$(GO_LDFLAGS)" github.com/hyperledger/fabric/$(TARGET) + @touch $@ build/bin: mkdir -p $@ @@ -145,50 +171,60 @@ build/bin: # Both peer and peer-image depend on ccenv-image and javaenv-image (all docker env images it supports) build/bin/peer: build/image/ccenv/.dummy build/image/javaenv/.dummy build/image/peer/.dummy: build/image/ccenv/.dummy build/image/javaenv/.dummy -build/image/peer/.dummy: build/docker/bin/examples/events/block-listener/ -build/bin/%: build/image/base/.dummy $(PROJECT_FILES) +build/bin/block-listener: @mkdir -p $(@D) - @echo "$@" - $(CGO_FLAGS) GOBIN=$(abspath $(@D)) go install $(PKGNAME)/$(@F) + $(CGO_FLAGS) GOBIN=$(abspath $(@D)) go install $(PKGNAME)/examples/events/block-listener @echo "Binary available as $@" @touch $@ -# Special override for base-image. -build/image/base/.dummy: $(BASEIMAGE_DEPS) - @echo "Building docker base-image" +build/bin/%: $(PROJECT_FILES) @mkdir -p $(@D) - @./scripts/provision/docker.sh $(BASEIMAGE_RELEASE) + @echo "$@" + $(CGO_FLAGS) GOBIN=$(abspath $(@D)) go install -ldflags "$(GO_LDFLAGS)" $(PKGNAME)/$(@F) + @echo "Binary available as $@" @touch $@ # Special override for src-image -build/image/src/.dummy: build/image/base/.dummy $(PROJECT_FILES) +build/image/src/.dummy: $(PROJECT_FILES) @echo "Building docker src-image" @mkdir -p $(@D) - @cat images/src/Dockerfile.in > $(@D)/Dockerfile + @cat images/src/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ + | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ + > $(@D)/Dockerfile @git ls-files | tar -jcT - > $(@D)/gopath.tar.bz2 - docker build -t $(PROJECT_NAME)-src:latest $(@D) + docker build -t $(PROJECT_NAME)-src $(@D) + docker tag $(PROJECT_NAME)-src $(PROJECT_NAME)-src:$(DOCKER_TAG) @touch $@ # Special override for ccenv-image (chaincode-environment) -build/image/ccenv/.dummy: build/image/src/.dummy build/image/ccenv/bin/protoc-gen-go build/image/ccenv/bin/chaintool Makefile +build/image/ccenv/.dummy: build/image/ccenv/bin/protoc-gen-go build/image/ccenv/bin/chaintool build/image/ccenv/goshim.tar.bz2 Makefile @echo "Building docker ccenv-image" - @cat images/ccenv/Dockerfile.in > $(@D)/Dockerfile - docker build -t $(PROJECT_NAME)-ccenv:latest $(@D) + @cat images/ccenv/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ + | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ + > $(@D)/Dockerfile + docker build -t $(PROJECT_NAME)-ccenv $(@D) + docker tag $(PROJECT_NAME)-ccenv $(PROJECT_NAME)-ccenv:$(DOCKER_TAG) @touch $@ -# Special override for java-image +# Special override for java-image build/image/javaenv/.dummy: Makefile $(JAVASHIM_DEPS) @echo "Building docker javaenv-image" @mkdir -p $(@D) - @cat images/javaenv/Dockerfile.in > $(@D)/Dockerfile + @cat images/javaenv/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ + | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ + > $(@D)/Dockerfile # Following items are packed and sent to docker context while building image # 1. Java shim layer source code # 2. Proto files used to generate java classes # 3. Gradle settings file @git ls-files core/chaincode/shim/java | tar -jcT - > $(@D)/javashimsrc.tar.bz2 @git ls-files protos settings.gradle | tar -jcT - > $(@D)/protos.tar.bz2 - docker build -t $(PROJECT_NAME)-javaenv:latest $(@D) + docker build -t $(PROJECT_NAME)-javaenv $(@D) + docker tag $(PROJECT_NAME)-javaenv $(PROJECT_NAME)-javaenv:$(DOCKER_TAG) @touch $@ # Default rule for image creation @@ -196,31 +232,32 @@ build/image/%/.dummy: build/image/src/.dummy build/docker/bin/% $(eval TARGET = ${patsubst build/image/%/.dummy,%,${@}}) @echo "Building docker $(TARGET)-image" @mkdir -p $(@D)/bin - @cat images/app/Dockerfile.in | sed -e 's/_TARGET_/$(TARGET)/g' > $(@D)/Dockerfile + @cat images/app/Dockerfile.in \ + | sed -e 's/_BASE_TAG_/$(BASE_DOCKER_TAG)/g' \ + | sed -e 's/_TAG_/$(DOCKER_TAG)/g' \ + > $(@D)/Dockerfile cp build/docker/bin/$(TARGET) $(@D)/bin - docker build -t $(PROJECT_NAME)-$(TARGET):latest $(@D) + docker build -t $(PROJECT_NAME)-$(TARGET) $(@D) + docker tag $(PROJECT_NAME)-$(TARGET) $(PROJECT_NAME)-$(TARGET):$(DOCKER_TAG) @touch $@ +%/goshim.tar.bz2: $(GOSHIM_DEPS) Makefile + @echo "Creating $@" + @tar -jhc -C $(GOPATH)/src $(patsubst $(GOPATH)/src/%,%,$(GOSHIM_DEPS)) > $@ + .PHONY: protos protos: gotools ./devenv/compile_protos.sh -base-image-clean: - -docker rmi -f $(PROJECT_NAME)-baseimage - -@rm -rf build/image/base ||: +src-image-clean: ccenv-image-clean peer-image-clean membersrvc-image-clean %-image-clean: $(eval TARGET = ${patsubst %-image-clean,%,${@}}) - -docker rmi -f $(PROJECT_NAME)-$(TARGET) + -docker images -q $(PROJECT_NAME)-$(TARGET) | xargs -r docker rmi -f -@rm -rf build/image/$(TARGET) ||: images-clean: $(patsubst %,%-image-clean, $(IMAGES)) -node-sdk: sdk/node - -node-sdk-unit-tests: peer membersrvc - cd sdk/node && $(MAKE) unit-tests - .PHONY: $(SUBDIRS:=-clean) $(SUBDIRS:=-clean): cd $(patsubst %-clean,%,$@) && $(MAKE) clean diff --git a/README.md b/README.md new file mode 100644 index 00000000000..783901089a9 --- /dev/null +++ b/README.md @@ -0,0 +1,51 @@ + +**Note:** This is a **read-only mirror** of the formal [Gerrit](https://gerrit.hyperledger.org/r/#/admin/projects/fabric) repository, +where active development is ongoing. Issue tracking is handled in [Jira](https://jira.hyperledger.org/secure/RapidBoard.jspa?projectKey=FAB&rapidView=5&view=planning) + +## Incubation Notice + +This project is a Hyperledger project in _Incubation_. It was proposed to the +community and documented [here](https://goo.gl/RYQZ5N). Information on what +_Incubation_ entails can be found in the [Hyperledger Project Lifecycle +document](https://goo.gl/4edNRc). + +[![Build Status](https://jenkins.hyperledger.org/buildStatus/icon?job=fabric-merge-x86_64)](https://jenkins.hyperledger.org/view/fabric/job/fabric-merge-x86_64/) +[![Go Report Card](https://goreportcard.com/badge/github.com/hyperledger/fabric)](https://goreportcard.com/report/github.com/hyperledger/fabric) +[![GoDoc](https://godoc.org/github.com/hyperledger/fabric?status.svg)](https://godoc.org/github.com/hyperledger/fabric) +[![Documentation Status](https://readthedocs.org/projects/hyperledger-fabric/badge/?version=latest)](http://hyperledger-fabric.readthedocs.io/en/latest/?badge=latest) + +## Hyperledger fabric + +The fabric is an implementation of blockchain technology, leveraging familiar +and proven technologies. It is a modular architecture allowing pluggable +implementations of various function. It features powerful container technology +to host any mainstream language for smart contracts development. + +## Documentation, Getting Started and Developer Guides + +This is a **read-only mirror** of the formal Gerrit repository, please visit our +[online documentation](http://hyperledger-fabric.readthedocs.io/en/latest/) for +information on getting started using and developing with the fabric, SDK and chaincode. + +## Contributing + +We welcome contributions to the Hyperledger Project in many forms. There’s always plenty to do! +Full details of how to contribute to this project are documented [here](http://hyperledger-fabric.readthedocs.io/en/latest/CONTRIBUTING/). + +## Community + +[Hyperledger Community](https://www.hyperledger.org/community) + +[Hyperledger mailing lists and archives](http://lists.hyperledger.org/) + +[Hyperledger Slack](http://hyperledgerproject.slack.com) - if you need an invitation, try our [Slack inviter](https://slack.hyperledger.org) + +[Hyperledger Wiki](https://github.com/hyperledger/hyperledger/wiki) + +[Hyperledger Code of Conduct](https://github.com/hyperledger/hyperledger/wiki/Hyperledger-Project-Code-of-Conduct) + +[Community Calendar](https://github.com/hyperledger/hyperledger/wiki/PublicMeetingCalendar) + +## License +The Hyperledger Project uses the [Apache License Version 2.0](LICENSE) software +license. diff --git a/bddtests/.behaverc b/bddtests/.behaverc index a08b4618932..14a66533fe6 100644 --- a/bddtests/.behaverc +++ b/bddtests/.behaverc @@ -6,6 +6,7 @@ tags=~@issue_767 ~@issue_1565 ~@issue_RBAC_TCERT_With_Attributes ~@sdk + ~@devops ~@preV1 ~@FAB-314 diff --git a/bddtests/bdd-docker/docker-compose-4-consensus-batch-nosnapshotbuffer.yml b/bddtests/bdd-docker/docker-compose-4-consensus-batch-nosnapshotbuffer.yml new file mode 100644 index 00000000000..18a7de5ed1f --- /dev/null +++ b/bddtests/bdd-docker/docker-compose-4-consensus-batch-nosnapshotbuffer.yml @@ -0,0 +1,18 @@ +vp0: + environment: + # The combination of the following two environment variables ensures that a state snapshot will be pulled + # and that the state snapshot buffer will be exhausted + - CORE_PEER_SYNC_STATE_SNAPSHOT_CHANNELSIZE=0 + - CORE_STATETRANSFER_MAXDELTAS=1 +vp1: + environment: + - CORE_PEER_SYNC_STATE_SNAPSHOT_CHANNELSIZE=0 + - CORE_STATETRANSFER_MAXDELTAS=1 +vp2: + environment: + - CORE_PEER_SYNC_STATE_SNAPSHOT_CHANNELSIZE=0 + - CORE_STATETRANSFER_MAXDELTAS=1 +vp3: + environment: + - CORE_PEER_SYNC_STATE_SNAPSHOT_CHANNELSIZE=0 + - CORE_STATETRANSFER_MAXDELTAS=1 diff --git a/bddtests/docker-compose-1-devmode.yml b/bddtests/docker-compose-1-devmode.yml old mode 100755 new mode 100644 diff --git a/bddtests/environment.py b/bddtests/environment.py index 19c330b5921..a0b0415ae43 100644 --- a/bddtests/environment.py +++ b/bddtests/environment.py @@ -19,7 +19,7 @@ def getDockerComposeFileArgsFromYamlFile(compose_yaml): def after_scenario(context, scenario): get_logs = context.config.userdata.get("logs", "N") - if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y"): + if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): print("Scenario {0} failed. Getting container logs".format(scenario.name)) file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" # get logs from the peer containers diff --git a/bddtests/peer_basic.feature b/bddtests/peer_basic.feature index cf3b6482bac..f884eff3d52 100644 --- a/bddtests/peer_basic.feature +++ b/bddtests/peer_basic.feature @@ -350,6 +350,7 @@ Feature: Network of Peers # @doNotDecompose # @wip + @devops Scenario: chaincode map single peer content generated ID Given we compose "docker-compose-1.yml" When requesting "/chain" from "vp0" @@ -505,9 +506,11 @@ Feature: Network of Peers | docker-compose-4-consensus-batch.yml | 60 | + #@doNotDecompose + #@wip @issue_680 + @fab380 Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #680 (State transfer) - Given we compose "" And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: | vp0 | @@ -579,8 +582,9 @@ Feature: Network of Peers Examples: Consensus Options - | ComposeFile | WaitTime | - | docker-compose-4-consensus-batch.yml | 60 | + | ComposeFile | WaitTime | + | docker-compose-4-consensus-batch.yml | 60 | + | docker-compose-4-consensus-batch.yml docker-compose-4-consensus-batch-nosnapshotbuffer.yml | 60 | # @doNotDecompose diff --git a/bddtests/peer_logging.feature b/bddtests/peer_logging.feature old mode 100755 new mode 100644 diff --git a/bddtests/steps/peer_basic_impl.py b/bddtests/steps/peer_basic_impl.py index a014ee1cc7e..c287e1be16a 100644 --- a/bddtests/steps/peer_basic_impl.py +++ b/bddtests/steps/peer_basic_impl.py @@ -191,7 +191,7 @@ def deployChainCodeToContainer(context, chaincode, containerName): def createChaincodeSpec(context, chaincode): chaincode = validateChaincodeDictionary(chaincode) - args = to_bytes(prepend(chaincode["constructor"], chaincode["args"])) + args = prepend(chaincode["constructor"], chaincode["args"]) # Create a ChaincodeSpec structure chaincodeSpec = { "type": getChaincodeTypeValue(chaincode["language"]), @@ -317,17 +317,18 @@ def invokeChaincode(context, devopsFunc, functionName, containerName, idGenAlg=N if 'table' in context: # There is ctor arguments args = context.table[0].cells - args = to_bytes(prepend(functionName, args)) + args = prepend(functionName, args) for idx, attr in enumerate(attributes): attributes[idx] = attr.strip() - context.chaincodeSpec['ctorMsg']['args'] = args context.chaincodeSpec['attributes'] = attributes #If idGenAlg is passed then, we still using the deprecated devops API because this parameter can't be passed in the new API. if idGenAlg != None: + context.chaincodeSpec['ctorMsg']['args'] = to_bytes(args) invokeUsingDevopsService(context, devopsFunc, functionName, containerName, idGenAlg) else: + context.chaincodeSpec['ctorMsg']['args'] = args invokeUsingChaincodeService(context, devopsFunc, functionName, containerName) def invokeUsingChaincodeService(context, devopsFunc, functionName, containerName): @@ -375,7 +376,7 @@ def invokeMasterChaincode(context, devopsFunc, chaincodeName, functionName, cont args = [] if 'table' in context: args = context.table[0].cells - args = to_bytes(prepend(functionName, args)) + args = prepend(functionName, args) typeGolang = 1 chaincodeSpec = { "type": typeGolang, @@ -585,7 +586,7 @@ def step_impl(context, chaincodeName, functionName): if 'table' in context: # There is ctor arguments args = context.table[0].cells - args = to_bytes(prepend(functionName, args)) + args = prepend(functionName, args) context.chaincodeSpec['ctorMsg']['args'] = args #context.table[0].cells if ('table' in context) else [] # Invoke the POST chaincodeOpPayload = createChaincodeOpPayload("query", context.chaincodeSpec) @@ -617,7 +618,7 @@ def query_common(context, chaincodeName, functionName, value, failOnError): containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData) # Update the chaincodeSpec ctorMsg for invoke - context.chaincodeSpec['ctorMsg']['args'] = to_bytes([functionName, value]) + context.chaincodeSpec['ctorMsg']['args'] = [functionName, value] # Invoke the POST # Make deep copy of chaincodeSpec as we will be changing the SecurityContext per call. chaincodeOpPayload = createChaincodeOpPayload("query", copy.deepcopy(context.chaincodeSpec)) @@ -724,7 +725,7 @@ def step_impl(context): def step_impl(context): gopath = os.environ.get('GOPATH') assert gopath is not None, "Please set GOPATH properly!" - listener = os.path.join(gopath, "src/github.com/hyperledger/fabric/build/docker/bin/block-listener") + listener = os.path.join(gopath, "src/github.com/hyperledger/fabric/build/bin/block-listener") assert os.path.isfile(listener), "Please build the block-listener binary!" bdd_test_util.start_background_process(context, "eventlistener", [listener, "-listen-to-rejections"] ) @@ -753,8 +754,12 @@ def to_bytes(strlist): def prepend(elem, l): if l is None or l == "": - return [elem] - return [elem] + l + tail = [] + else: + tail = l + if elem is None: + return tail + return [elem] + tail @given(u'I do nothing') def step_impl(context): diff --git a/bddtests/syschaincode/noop/chaincode_test.go b/bddtests/syschaincode/noop/chaincode_test.go index 14d19ab3123..20431d0fa5b 100644 --- a/bddtests/syschaincode/noop/chaincode_test.go +++ b/bddtests/syschaincode/noop/chaincode_test.go @@ -58,7 +58,7 @@ func TestInvokeExecuteNotEnoughArgs(t *testing.T) { func TestInvokeExecuteOneArgReturnsNothing(t *testing.T) { var noop = SystemChaincode{mockLedger{}} - stub := shim.InitTestStub("arg1") + stub := shim.InitTestStub("transaction") var res, err = noop.Invoke(stub) if res != nil || err != nil { t.Errorf("Invoke.execute has to return nil with no error.") @@ -67,7 +67,7 @@ func TestInvokeExecuteOneArgReturnsNothing(t *testing.T) { func TestInvokeExecuteMoreArgsReturnsError(t *testing.T) { var noop = SystemChaincode{mockLedger{}} - stub := shim.InitTestStub("arg1", "arg2") + stub := shim.InitTestStub("transaction", "arg1") var res, err = noop.Invoke(stub) if res != nil || err == nil { t.Errorf("Invoke.execute has to return error when called with more than one arguments.") diff --git a/consensus/executor/executor.go b/consensus/executor/executor.go index bab2fa2f8e4..9cadbabd5db 100644 --- a/consensus/executor/executor.go +++ b/consensus/executor/executor.go @@ -131,6 +131,7 @@ func (co *coordinatorImpl) ProcessEvent(event events.Event) events.Event { for { err, recoverable := co.stc.SyncToTarget(info.Height-1, info.CurrentBlockHash, et.peers) if err == nil { + logger.Debug("State transfer sync completed, returning") co.skipInProgress = false co.consumer.StateUpdated(et.tag, info) return nil diff --git a/consensus/helper/handler.go b/consensus/helper/handler.go index 2ce99e16b69..3848b80a6f6 100644 --- a/consensus/helper/handler.go +++ b/consensus/helper/handler.go @@ -50,10 +50,9 @@ type ConsensusHandler struct { // NewConsensusHandler constructs a new MessageHandler for the plugin. // Is instance of peer.HandlerFactory func NewConsensusHandler(coord peer.MessageHandlerCoordinator, - stream peer.ChatStream, initiatedStream bool, - next peer.MessageHandler) (peer.MessageHandler, error) { + stream peer.ChatStream, initiatedStream bool) (peer.MessageHandler, error) { - peerHandler, err := peer.NewPeerHandler(coord, stream, initiatedStream, nil) + peerHandler, err := peer.NewPeerHandler(coord, stream, initiatedStream) if err != nil { return nil, fmt.Errorf("Error creating PeerHandler: %s", err) } @@ -70,10 +69,8 @@ func NewConsensusHandler(coord peer.MessageHandlerCoordinator, consensusQueueSize = DefaultConsensusQueueSize } - pe, _ := handler.To() - handler.consenterChan = make(chan *util.Message, consensusQueueSize) - getEngineImpl().consensusFan.RegisterChannel(pe.ID, handler.consenterChan) + getEngineImpl().consensusFan.AddFaninChannel(handler.consenterChan) return handler, nil } diff --git a/consensus/helper/helper.go b/consensus/helper/helper.go index e28d9c22097..e8581ade2a0 100644 --- a/consensus/helper/helper.go +++ b/consensus/helper/helper.go @@ -53,16 +53,16 @@ func NewHelper(mhc peer.MessageHandlerCoordinator) *Helper { coordinator: mhc, secOn: viper.GetBool("security.enabled"), secHelper: mhc.GetSecHelper(), - valid: true, // Assume our state is consistent until we are told otherwise, TODO: revisit + valid: true, // Assume our state is consistent until we are told otherwise, actual consensus (pbft) will invalidate this immediately, but noops will not } h.executor = executor.NewImpl(h, h, mhc) - h.executor.Start() return h } func (h *Helper) setConsenter(c consensus.Consenter) { h.consenter = c + h.executor.Start() // The consenter may be expecting a callback from the executor because of state transfer completing, it will miss this if we start the executor too early } // GetNetworkInfo returns the PeerEndpoints of the current validator and the entire validating network diff --git a/consensus/pbft/batch.go b/consensus/pbft/batch.go index 40e7942a102..212ebf9939a 100644 --- a/consensus/pbft/batch.go +++ b/consensus/pbft/batch.go @@ -18,7 +18,6 @@ package pbft import ( "fmt" - "google/protobuf" "time" "github.com/hyperledger/fabric/consensus" @@ -26,6 +25,7 @@ import ( pb "github.com/hyperledger/fabric/protos" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/op/go-logging" "github.com/spf13/viper" ) @@ -83,8 +83,17 @@ func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatc etf := events.NewTimerFactoryImpl(op.manager) op.pbft = newPbftCore(id, config, op, etf) op.manager.Start() + blockchainInfoBlob := stack.GetBlockchainInfoBlob() op.externalEventReceiver.manager = op.manager - op.broadcaster = newBroadcaster(id, op.pbft.N, op.pbft.f, stack) + op.broadcaster = newBroadcaster(id, op.pbft.N, op.pbft.f, op.pbft.broadcastTimeout, stack) + op.manager.Queue() <- workEvent(func() { + op.pbft.stateTransfer(&stateUpdateTarget{ + checkpointMessage: checkpointMessage{ + seqNo: op.pbft.lastExec, + id: blockchainInfoBlob, + }, + }) + }) op.batchSize = config.GetInt("general.batchsize") op.batchStore = nil @@ -242,7 +251,7 @@ func (op *obcBatch) sendBatch() events.Event { func (op *obcBatch) txToReq(tx []byte) *Request { now := time.Now() req := &Request{ - Timestamp: &google_protobuf.Timestamp{ + Timestamp: ×tamp.Timestamp{ Seconds: now.Unix(), Nanos: int32(now.UnixNano() % 1000000000), }, diff --git a/consensus/pbft/batch_test.go b/consensus/pbft/batch_test.go index b5a3fdacfbd..8ce2e164487 100644 --- a/consensus/pbft/batch_test.go +++ b/consensus/pbft/batch_test.go @@ -24,6 +24,7 @@ import ( "github.com/hyperledger/fabric/consensus/util/events" pb "github.com/hyperledger/fabric/protos" + "github.com/golang/protobuf/proto" "github.com/spf13/viper" ) @@ -76,8 +77,31 @@ func TestNetworkBatch(t *testing.T) { } } -func TestClearOustandingReqsOnStateRecovery(t *testing.T) { - b := newObcBatch(0, loadConfig(), &omniProto{}) +var inertState = &omniProto{ + GetBlockchainInfoImpl: func() *pb.BlockchainInfo { + return &pb.BlockchainInfo{ + CurrentBlockHash: []byte("GENESIS"), + Height: 1, + } + }, + GetBlockchainInfoBlobImpl: func() []byte { + b, _ := proto.Marshal(&pb.BlockchainInfo{ + CurrentBlockHash: []byte("GENESIS"), + Height: 1, + }) + return b + }, + InvalidateStateImpl: func() {}, + ValidateStateImpl: func() {}, + UpdateStateImpl: func(id interface{}, target *pb.BlockchainInfo, peers []*pb.PeerID) {}, +} + +func TestClearOutstandingReqsOnStateRecovery(t *testing.T) { + omni := *inertState + omni.UnicastImpl = func(msg *pb.Message, receiverHandle *pb.PeerID) error { return nil } + b := newObcBatch(0, loadConfig(), &omni) + b.StateUpdated(&checkpointMessage{seqNo: 0, id: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl()) + defer b.Close() b.reqStore.storeOutstanding(&Request{}) @@ -98,10 +122,9 @@ func TestClearOustandingReqsOnStateRecovery(t *testing.T) { func TestOutstandingReqsIngestion(t *testing.T) { bs := [3]*obcBatch{} for i := range bs { - omni := &omniProto{ - UnicastImpl: func(ocMsg *pb.Message, peer *pb.PeerID) error { return nil }, - } - bs[i] = newObcBatch(uint64(i), loadConfig(), omni) + omni := *inertState + omni.UnicastImpl = func(ocMsg *pb.Message, peer *pb.PeerID) error { return nil } + bs[i] = newObcBatch(uint64(i), loadConfig(), &omni) defer bs[i].Close() // Have vp1 only deliver messages @@ -115,6 +138,9 @@ func TestOutstandingReqsIngestion(t *testing.T) { } } } + for i := range bs { + bs[i].StateUpdated(&checkpointMessage{seqNo: 0, id: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl()) + } err := bs[1].RecvMsg(createTxMsg(1), &pb.PeerID{Name: "vp1"}) if err != nil { @@ -137,10 +163,10 @@ func TestOutstandingReqsIngestion(t *testing.T) { } func TestOutstandingReqsResubmission(t *testing.T) { - omni := &omniProto{} config := loadConfig() config.Set("general.batchsize", 2) - b := newObcBatch(0, config, omni) + omni := *inertState + b := newObcBatch(0, config, &omni) defer b.Close() // The broadcasting threads only cause problems here... but this test stalls without them transactionsBroadcast := 0 @@ -160,6 +186,9 @@ func TestOutstandingReqsResubmission(t *testing.T) { return nil } + b.StateUpdated(&checkpointMessage{seqNo: 0, id: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl()) + b.manager.Queue() <- nil // Make sure the state update finishes first + reqs := make([]*Request, 8) for i := 0; i < len(reqs); i++ { reqs[i] = createPbftReq(int64(i), 0) @@ -232,11 +261,12 @@ func TestOutstandingReqsResubmission(t *testing.T) { } func TestViewChangeOnPrimarySilence(t *testing.T) { - b := newObcBatch(1, loadConfig(), &omniProto{ - UnicastImpl: func(ocMsg *pb.Message, peer *pb.PeerID) error { return nil }, - SignImpl: func(msg []byte) ([]byte, error) { return msg, nil }, - VerifyImpl: func(peerID *pb.PeerID, signature []byte, message []byte) error { return nil }, - }) + omni := *inertState + omni.UnicastImpl = func(ocMsg *pb.Message, peer *pb.PeerID) error { return nil } // For the checkpoint + omni.SignImpl = func(msg []byte) ([]byte, error) { return msg, nil } + omni.VerifyImpl = func(peerID *pb.PeerID, signature []byte, message []byte) error { return nil } + b := newObcBatch(1, loadConfig(), &omni) + b.StateUpdated(&checkpointMessage{seqNo: 0, id: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl()) b.pbft.requestTimeout = 50 * time.Millisecond defer b.Close() @@ -347,7 +377,10 @@ func TestClassicBackToBackStateTransfer(t *testing.T) { } func TestClearBatchStoreOnViewChange(t *testing.T) { - b := newObcBatch(1, loadConfig(), &omniProto{}) + omni := *inertState + omni.UnicastImpl = func(ocMsg *pb.Message, peer *pb.PeerID) error { return nil } // For the checkpoint + b := newObcBatch(1, loadConfig(), &omni) + b.StateUpdated(&checkpointMessage{seqNo: 0, id: inertState.GetBlockchainInfoBlobImpl()}, inertState.GetBlockchainInfoImpl()) defer b.Close() b.batchStore = []*Request{&Request{}} diff --git a/consensus/pbft/broadcast.go b/consensus/pbft/broadcast.go index fb6a5dce030..edcb2dd5021 100644 --- a/consensus/pbft/broadcast.go +++ b/consensus/pbft/broadcast.go @@ -33,10 +33,11 @@ type communicator interface { type broadcaster struct { comm communicator - f int - msgChans map[uint64]chan *sendRequest - closed sync.WaitGroup - closedCh chan struct{} + f int + broadcastTimeout time.Duration + msgChans map[uint64]chan *sendRequest + closed sync.WaitGroup + closedCh chan struct{} } type sendRequest struct { @@ -44,15 +45,16 @@ type sendRequest struct { done chan bool } -func newBroadcaster(self uint64, N int, f int, c communicator) *broadcaster { +func newBroadcaster(self uint64, N int, f int, broadcastTimeout time.Duration, c communicator) *broadcaster { queueSize := 10 // XXX increase after testing chans := make(map[uint64]chan *sendRequest) b := &broadcaster{ - comm: c, - f: f, - msgChans: chans, - closedCh: make(chan struct{}), + comm: c, + f: f, + broadcastTimeout: broadcastTimeout, + msgChans: chans, + closedCh: make(chan struct{}), } for i := 0; i < N; i++ { if uint64(i) == self { @@ -179,7 +181,7 @@ func (b *broadcaster) send(msg *pb.Message, dest *uint64) error { } succeeded := 0 - timer := time.NewTimer(time.Second) // TODO, make this configurable + timer := time.NewTimer(b.broadcastTimeout) // This loop will try to send, until one of: // a) the required number of sends succeed diff --git a/consensus/pbft/broadcast_test.go b/consensus/pbft/broadcast_test.go index f30e3a97ea2..d8563ff3408 100644 --- a/consensus/pbft/broadcast_test.go +++ b/consensus/pbft/broadcast_test.go @@ -69,7 +69,7 @@ func TestBroadcast(t *testing.T) { } }() - b := newBroadcaster(1, 4, 1, m) + b := newBroadcaster(1, 4, 1, time.Second, m) msg := &pb.Message{Payload: []byte("hi")} b.Broadcast(msg) @@ -123,7 +123,7 @@ func TestBroadcastStuck(t *testing.T) { } }() - b := newBroadcaster(1, 4, 1, m) + b := newBroadcaster(1, 4, 1, time.Second, m) maxc := 20 for c := 0; c < maxc; c++ { @@ -168,7 +168,7 @@ func TestBroadcastUnicast(t *testing.T) { } }() - b := newBroadcaster(1, 4, 1, m) + b := newBroadcaster(1, 4, 1, time.Second, m) msg := &pb.Message{Payload: []byte("hi")} b.Unicast(msg, 0) @@ -206,7 +206,7 @@ func TestBroadcastAllFail(t *testing.T) { done: make(chan struct{}), } - b := newBroadcaster(1, 4, 1, m) + b := newBroadcaster(1, 4, 1, time.Second, m) maxc := 20 for c := 0; c < maxc; c++ { @@ -228,6 +228,41 @@ func TestBroadcastAllFail(t *testing.T) { } } +func TestBroadcastTimeout(t *testing.T) { + expectTime := 10 * time.Second + deltaTime := 50 * time.Millisecond + m := &mockIndefinitelyStuckComm{ + mockComm: mockComm{ + self: 1, + n: 4, + msgCh: make(chan mockMsg), + }, + done: make(chan struct{}), + } + + b := newBroadcaster(1, 4, 1, expectTime, m) + broadcastDone := make(chan time.Time) + + beginTime := time.Now() + go func() { + b.Broadcast(&pb.Message{Payload: []byte(fmt.Sprintf("%d", 1))}) + broadcastDone <- time.Now() + }() + + checkTime := expectTime + deltaTime + select { + case endTime := <-broadcastDone: + t.Log("Broadcast consume time: ", endTime.Sub(beginTime)) + close(broadcastDone) + close(m.done) + return + case <-time.After(checkTime): + close(broadcastDone) + close(m.done) + t.Fatalf("Broadcast timeout after %v, expected %v", checkTime, expectTime) + } +} + type mockIndefinitelyStuckComm struct { mockComm done chan struct{} @@ -250,7 +285,7 @@ func TestBroadcastIndefinitelyStuck(t *testing.T) { done: make(chan struct{}), } - b := newBroadcaster(1, 4, 1, m) + b := newBroadcaster(1, 4, 1, time.Second, m) broadcastDone := make(chan struct{}) diff --git a/consensus/pbft/config.yaml b/consensus/pbft/config.yaml index ba74553e9f0..2ed06c89e49 100644 --- a/consensus/pbft/config.yaml +++ b/consensus/pbft/config.yaml @@ -59,6 +59,9 @@ general: # Interval to send "keep-alive" null requests. Set to 0 to disable. If enabled, must be greater than request timeout nullrequest: 0s + # How long may a message broadcast take. + broadcast: 1s + ################################################################################ # # SECTION: EXECUTOR diff --git a/consensus/pbft/messages.pb.go b/consensus/pbft/messages.pb.go index a7e9d867967..dd9e697aaac 100644 --- a/consensus/pbft/messages.pb.go +++ b/consensus/pbft/messages.pb.go @@ -29,13 +29,19 @@ package pbft import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "google/protobuf" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type Message struct { // Types that are valid to be assigned to Payload: // *Message_RequestBatch @@ -50,19 +56,20 @@ type Message struct { Payload isMessage_Payload `protobuf_oneof:"payload"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type isMessage_Payload interface { isMessage_Payload() } type Message_RequestBatch struct { - RequestBatch *RequestBatch `protobuf:"bytes,1,opt,name=request_batch,oneof"` + RequestBatch *RequestBatch `protobuf:"bytes,1,opt,name=request_batch,json=requestBatch,oneof"` } type Message_PrePrepare struct { - PrePrepare *PrePrepare `protobuf:"bytes,2,opt,name=pre_prepare,oneof"` + PrePrepare *PrePrepare `protobuf:"bytes,2,opt,name=pre_prepare,json=prePrepare,oneof"` } type Message_Prepare struct { Prepare *Prepare `protobuf:"bytes,3,opt,name=prepare,oneof"` @@ -74,16 +81,16 @@ type Message_Checkpoint struct { Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,oneof"` } type Message_ViewChange struct { - ViewChange *ViewChange `protobuf:"bytes,6,opt,name=view_change,oneof"` + ViewChange *ViewChange `protobuf:"bytes,6,opt,name=view_change,json=viewChange,oneof"` } type Message_NewView struct { - NewView *NewView `protobuf:"bytes,7,opt,name=new_view,oneof"` + NewView *NewView `protobuf:"bytes,7,opt,name=new_view,json=newView,oneof"` } type Message_FetchRequestBatch struct { - FetchRequestBatch *FetchRequestBatch `protobuf:"bytes,8,opt,name=fetch_request_batch,oneof"` + FetchRequestBatch *FetchRequestBatch `protobuf:"bytes,8,opt,name=fetch_request_batch,json=fetchRequestBatch,oneof"` } type Message_ReturnRequestBatch struct { - ReturnRequestBatch *RequestBatch `protobuf:"bytes,9,opt,name=return_request_batch,oneof"` + ReturnRequestBatch *RequestBatch `protobuf:"bytes,9,opt,name=return_request_batch,json=returnRequestBatch,oneof"` } func (*Message_RequestBatch) isMessage_Payload() {} @@ -167,8 +174,8 @@ func (m *Message) GetReturnRequestBatch() *RequestBatch { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*Message) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _Message_OneofMarshaler, _Message_OneofUnmarshaler, []interface{}{ +func (*Message) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Message_OneofMarshaler, _Message_OneofUnmarshaler, _Message_OneofSizer, []interface{}{ (*Message_RequestBatch)(nil), (*Message_PrePrepare)(nil), (*Message_Prepare)(nil), @@ -317,16 +324,73 @@ func _Message_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer } } +func _Message_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Message) + // payload + switch x := m.Payload.(type) { + case *Message_RequestBatch: + s := proto.Size(x.RequestBatch) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_PrePrepare: + s := proto.Size(x.PrePrepare) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_Prepare: + s := proto.Size(x.Prepare) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_Commit: + s := proto.Size(x.Commit) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_Checkpoint: + s := proto.Size(x.Checkpoint) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_ViewChange: + s := proto.Size(x.ViewChange) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_NewView: + s := proto.Size(x.NewView) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_FetchRequestBatch: + s := proto.Size(x.FetchRequestBatch) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Message_ReturnRequestBatch: + s := proto.Size(x.ReturnRequestBatch) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type Request struct { Timestamp *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` - ReplicaId uint64 `protobuf:"varint,3,opt,name=replica_id" json:"replica_id,omitempty"` + ReplicaId uint64 `protobuf:"varint,3,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *Request) GetTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -337,15 +401,16 @@ func (m *Request) GetTimestamp() *google_protobuf.Timestamp { type PrePrepare struct { View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"` - SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number" json:"sequence_number,omitempty"` - BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest" json:"batch_digest,omitempty"` - RequestBatch *RequestBatch `protobuf:"bytes,4,opt,name=request_batch" json:"request_batch,omitempty"` - ReplicaId uint64 `protobuf:"varint,5,opt,name=replica_id" json:"replica_id,omitempty"` + SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest,json=batchDigest" json:"batch_digest,omitempty"` + RequestBatch *RequestBatch `protobuf:"bytes,4,opt,name=request_batch,json=requestBatch" json:"request_batch,omitempty"` + ReplicaId uint64 `protobuf:"varint,5,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` } -func (m *PrePrepare) Reset() { *m = PrePrepare{} } -func (m *PrePrepare) String() string { return proto.CompactTextString(m) } -func (*PrePrepare) ProtoMessage() {} +func (m *PrePrepare) Reset() { *m = PrePrepare{} } +func (m *PrePrepare) String() string { return proto.CompactTextString(m) } +func (*PrePrepare) ProtoMessage() {} +func (*PrePrepare) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *PrePrepare) GetRequestBatch() *RequestBatch { if m != nil { @@ -356,44 +421,48 @@ func (m *PrePrepare) GetRequestBatch() *RequestBatch { type Prepare struct { View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"` - SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number" json:"sequence_number,omitempty"` - BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest" json:"batch_digest,omitempty"` - ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id" json:"replica_id,omitempty"` + SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest,json=batchDigest" json:"batch_digest,omitempty"` + ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` } -func (m *Prepare) Reset() { *m = Prepare{} } -func (m *Prepare) String() string { return proto.CompactTextString(m) } -func (*Prepare) ProtoMessage() {} +func (m *Prepare) Reset() { *m = Prepare{} } +func (m *Prepare) String() string { return proto.CompactTextString(m) } +func (*Prepare) ProtoMessage() {} +func (*Prepare) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type Commit struct { View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"` - SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number" json:"sequence_number,omitempty"` - BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest" json:"batch_digest,omitempty"` - ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id" json:"replica_id,omitempty"` + SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest,json=batchDigest" json:"batch_digest,omitempty"` + ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` } -func (m *Commit) Reset() { *m = Commit{} } -func (m *Commit) String() string { return proto.CompactTextString(m) } -func (*Commit) ProtoMessage() {} +func (m *Commit) Reset() { *m = Commit{} } +func (m *Commit) String() string { return proto.CompactTextString(m) } +func (*Commit) ProtoMessage() {} +func (*Commit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } type BlockInfo struct { - BlockNumber uint64 `protobuf:"varint,1,opt,name=block_number" json:"block_number,omitempty"` - BlockHash []byte `protobuf:"bytes,2,opt,name=block_hash,proto3" json:"block_hash,omitempty"` + BlockNumber uint64 `protobuf:"varint,1,opt,name=block_number,json=blockNumber" json:"block_number,omitempty"` + BlockHash []byte `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` } -func (m *BlockInfo) Reset() { *m = BlockInfo{} } -func (m *BlockInfo) String() string { return proto.CompactTextString(m) } -func (*BlockInfo) ProtoMessage() {} +func (m *BlockInfo) Reset() { *m = BlockInfo{} } +func (m *BlockInfo) String() string { return proto.CompactTextString(m) } +func (*BlockInfo) ProtoMessage() {} +func (*BlockInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } type Checkpoint struct { - SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number" json:"sequence_number,omitempty"` - ReplicaId uint64 `protobuf:"varint,2,opt,name=replica_id" json:"replica_id,omitempty"` + SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + ReplicaId uint64 `protobuf:"varint,2,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` Id string `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"` } -func (m *Checkpoint) Reset() { *m = Checkpoint{} } -func (m *Checkpoint) String() string { return proto.CompactTextString(m) } -func (*Checkpoint) ProtoMessage() {} +func (m *Checkpoint) Reset() { *m = Checkpoint{} } +func (m *Checkpoint) String() string { return proto.CompactTextString(m) } +func (*Checkpoint) ProtoMessage() {} +func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } type ViewChange struct { View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"` @@ -401,13 +470,14 @@ type ViewChange struct { Cset []*ViewChange_C `protobuf:"bytes,3,rep,name=cset" json:"cset,omitempty"` Pset []*ViewChange_PQ `protobuf:"bytes,4,rep,name=pset" json:"pset,omitempty"` Qset []*ViewChange_PQ `protobuf:"bytes,5,rep,name=qset" json:"qset,omitempty"` - ReplicaId uint64 `protobuf:"varint,6,opt,name=replica_id" json:"replica_id,omitempty"` + ReplicaId uint64 `protobuf:"varint,6,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *ViewChange) Reset() { *m = ViewChange{} } -func (m *ViewChange) String() string { return proto.CompactTextString(m) } -func (*ViewChange) ProtoMessage() {} +func (m *ViewChange) Reset() { *m = ViewChange{} } +func (m *ViewChange) String() string { return proto.CompactTextString(m) } +func (*ViewChange) ProtoMessage() {} +func (*ViewChange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *ViewChange) GetCset() []*ViewChange_C { if m != nil { @@ -432,31 +502,34 @@ func (m *ViewChange) GetQset() []*ViewChange_PQ { // This message should go away and become a checkpoint once replica_id is removed type ViewChange_C struct { - SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number" json:"sequence_number,omitempty"` + SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` Id string `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"` } -func (m *ViewChange_C) Reset() { *m = ViewChange_C{} } -func (m *ViewChange_C) String() string { return proto.CompactTextString(m) } -func (*ViewChange_C) ProtoMessage() {} +func (m *ViewChange_C) Reset() { *m = ViewChange_C{} } +func (m *ViewChange_C) String() string { return proto.CompactTextString(m) } +func (*ViewChange_C) ProtoMessage() {} +func (*ViewChange_C) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } type ViewChange_PQ struct { - SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number" json:"sequence_number,omitempty"` - BatchDigest string `protobuf:"bytes,2,opt,name=batch_digest" json:"batch_digest,omitempty"` + SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + BatchDigest string `protobuf:"bytes,2,opt,name=batch_digest,json=batchDigest" json:"batch_digest,omitempty"` View uint64 `protobuf:"varint,3,opt,name=view" json:"view,omitempty"` } -func (m *ViewChange_PQ) Reset() { *m = ViewChange_PQ{} } -func (m *ViewChange_PQ) String() string { return proto.CompactTextString(m) } -func (*ViewChange_PQ) ProtoMessage() {} +func (m *ViewChange_PQ) Reset() { *m = ViewChange_PQ{} } +func (m *ViewChange_PQ) String() string { return proto.CompactTextString(m) } +func (*ViewChange_PQ) ProtoMessage() {} +func (*ViewChange_PQ) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 1} } type PQset struct { Set []*ViewChange_PQ `protobuf:"bytes,1,rep,name=set" json:"set,omitempty"` } -func (m *PQset) Reset() { *m = PQset{} } -func (m *PQset) String() string { return proto.CompactTextString(m) } -func (*PQset) ProtoMessage() {} +func (m *PQset) Reset() { *m = PQset{} } +func (m *PQset) String() string { return proto.CompactTextString(m) } +func (*PQset) ProtoMessage() {} +func (*PQset) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *PQset) GetSet() []*ViewChange_PQ { if m != nil { @@ -469,12 +542,13 @@ type NewView struct { View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"` Vset []*ViewChange `protobuf:"bytes,2,rep,name=vset" json:"vset,omitempty"` Xset map[uint64]string `protobuf:"bytes,3,rep,name=xset" json:"xset,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id" json:"replica_id,omitempty"` + ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` } -func (m *NewView) Reset() { *m = NewView{} } -func (m *NewView) String() string { return proto.CompactTextString(m) } -func (*NewView) ProtoMessage() {} +func (m *NewView) Reset() { *m = NewView{} } +func (m *NewView) String() string { return proto.CompactTextString(m) } +func (*NewView) ProtoMessage() {} +func (*NewView) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *NewView) GetVset() []*ViewChange { if m != nil { @@ -491,21 +565,23 @@ func (m *NewView) GetXset() map[uint64]string { } type FetchRequestBatch struct { - BatchDigest string `protobuf:"bytes,1,opt,name=batch_digest" json:"batch_digest,omitempty"` - ReplicaId uint64 `protobuf:"varint,2,opt,name=replica_id" json:"replica_id,omitempty"` + BatchDigest string `protobuf:"bytes,1,opt,name=batch_digest,json=batchDigest" json:"batch_digest,omitempty"` + ReplicaId uint64 `protobuf:"varint,2,opt,name=replica_id,json=replicaId" json:"replica_id,omitempty"` } -func (m *FetchRequestBatch) Reset() { *m = FetchRequestBatch{} } -func (m *FetchRequestBatch) String() string { return proto.CompactTextString(m) } -func (*FetchRequestBatch) ProtoMessage() {} +func (m *FetchRequestBatch) Reset() { *m = FetchRequestBatch{} } +func (m *FetchRequestBatch) String() string { return proto.CompactTextString(m) } +func (*FetchRequestBatch) ProtoMessage() {} +func (*FetchRequestBatch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } type RequestBatch struct { Batch []*Request `protobuf:"bytes,1,rep,name=batch" json:"batch,omitempty"` } -func (m *RequestBatch) Reset() { *m = RequestBatch{} } -func (m *RequestBatch) String() string { return proto.CompactTextString(m) } -func (*RequestBatch) ProtoMessage() {} +func (m *RequestBatch) Reset() { *m = RequestBatch{} } +func (m *RequestBatch) String() string { return proto.CompactTextString(m) } +func (*RequestBatch) ProtoMessage() {} +func (*RequestBatch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *RequestBatch) GetBatch() []*Request { if m != nil { @@ -523,9 +599,10 @@ type BatchMessage struct { Payload isBatchMessage_Payload `protobuf_oneof:"payload"` } -func (m *BatchMessage) Reset() { *m = BatchMessage{} } -func (m *BatchMessage) String() string { return proto.CompactTextString(m) } -func (*BatchMessage) ProtoMessage() {} +func (m *BatchMessage) Reset() { *m = BatchMessage{} } +func (m *BatchMessage) String() string { return proto.CompactTextString(m) } +func (*BatchMessage) ProtoMessage() {} +func (*BatchMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } type isBatchMessage_Payload interface { isBatchMessage_Payload() @@ -535,10 +612,10 @@ type BatchMessage_Request struct { Request *Request `protobuf:"bytes,1,opt,name=request,oneof"` } type BatchMessage_RequestBatch struct { - RequestBatch *RequestBatch `protobuf:"bytes,2,opt,name=request_batch,oneof"` + RequestBatch *RequestBatch `protobuf:"bytes,2,opt,name=request_batch,json=requestBatch,oneof"` } type BatchMessage_PbftMessage struct { - PbftMessage []byte `protobuf:"bytes,3,opt,name=pbft_message,proto3,oneof"` + PbftMessage []byte `protobuf:"bytes,3,opt,name=pbft_message,json=pbftMessage,proto3,oneof"` } type BatchMessage_Complaint struct { Complaint *Request `protobuf:"bytes,4,opt,name=complaint,oneof"` @@ -585,8 +662,8 @@ func (m *BatchMessage) GetComplaint() *Request { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*BatchMessage) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _BatchMessage_OneofMarshaler, _BatchMessage_OneofUnmarshaler, []interface{}{ +func (*BatchMessage) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BatchMessage_OneofMarshaler, _BatchMessage_OneofUnmarshaler, _BatchMessage_OneofSizer, []interface{}{ (*BatchMessage_Request)(nil), (*BatchMessage_RequestBatch)(nil), (*BatchMessage_PbftMessage)(nil), @@ -662,10 +739,119 @@ func _BatchMessage_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.B } } +func _BatchMessage_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BatchMessage) + // payload + switch x := m.Payload.(type) { + case *BatchMessage_Request: + s := proto.Size(x.Request) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchMessage_RequestBatch: + s := proto.Size(x.RequestBatch) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchMessage_PbftMessage: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.PbftMessage))) + n += len(x.PbftMessage) + case *BatchMessage_Complaint: + s := proto.Size(x.Complaint) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type Metadata struct { SeqNo uint64 `protobuf:"varint,1,opt,name=seqNo" json:"seqNo,omitempty"` } -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func init() { + proto.RegisterType((*Message)(nil), "pbft.message") + proto.RegisterType((*Request)(nil), "pbft.request") + proto.RegisterType((*PrePrepare)(nil), "pbft.pre_prepare") + proto.RegisterType((*Prepare)(nil), "pbft.prepare") + proto.RegisterType((*Commit)(nil), "pbft.commit") + proto.RegisterType((*BlockInfo)(nil), "pbft.block_info") + proto.RegisterType((*Checkpoint)(nil), "pbft.checkpoint") + proto.RegisterType((*ViewChange)(nil), "pbft.view_change") + proto.RegisterType((*ViewChange_C)(nil), "pbft.view_change.C") + proto.RegisterType((*ViewChange_PQ)(nil), "pbft.view_change.PQ") + proto.RegisterType((*PQset)(nil), "pbft.PQset") + proto.RegisterType((*NewView)(nil), "pbft.new_view") + proto.RegisterType((*FetchRequestBatch)(nil), "pbft.fetch_request_batch") + proto.RegisterType((*RequestBatch)(nil), "pbft.request_batch") + proto.RegisterType((*BatchMessage)(nil), "pbft.batch_message") + proto.RegisterType((*Metadata)(nil), "pbft.metadata") +} + +func init() { proto.RegisterFile("messages.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 848 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0xee, 0x52, 0x94, 0x64, 0x8e, 0x68, 0xd5, 0x5e, 0xfb, 0xc0, 0x0a, 0x35, 0xea, 0xd2, 0xa8, + 0x6b, 0xa3, 0x2d, 0x0d, 0xb8, 0x06, 0x6a, 0x18, 0x3d, 0xd9, 0x2d, 0xea, 0xa2, 0xa8, 0x21, 0x13, + 0x41, 0x92, 0x1b, 0x41, 0x51, 0x2b, 0x89, 0xb0, 0x44, 0xd2, 0x14, 0xe5, 0xc4, 0x2f, 0x90, 0xe4, + 0x92, 0x17, 0xc8, 0x03, 0xe5, 0x92, 0x73, 0xde, 0x27, 0xbb, 0xc3, 0xa5, 0xf8, 0x23, 0x45, 0xf6, + 0x25, 0xc8, 0x81, 0xc0, 0xee, 0xcc, 0xf7, 0x0d, 0x67, 0xbf, 0x9d, 0xdd, 0x59, 0x68, 0x4f, 0xd8, + 0x74, 0xea, 0x0e, 0xd9, 0xd4, 0x8a, 0xe2, 0x30, 0x09, 0xa9, 0x1a, 0xf5, 0x06, 0x49, 0xe7, 0x87, + 0x61, 0x18, 0x0e, 0xc7, 0xec, 0x08, 0x6d, 0xbd, 0xd9, 0xe0, 0x28, 0xf1, 0x39, 0x2e, 0x71, 0x27, + 0x51, 0x0a, 0x33, 0x5f, 0xa9, 0xd0, 0x94, 0x4c, 0x7a, 0x06, 0xeb, 0x31, 0xbb, 0x9d, 0x71, 0xbf, + 0xd3, 0x73, 0x13, 0x6f, 0x64, 0x90, 0x5d, 0x72, 0xd0, 0x3a, 0xde, 0xb2, 0x44, 0x28, 0xab, 0xe4, + 0xba, 0xfc, 0xc6, 0xd6, 0xa5, 0xe1, 0x5c, 0xcc, 0xe9, 0x09, 0xb4, 0xa2, 0x98, 0x39, 0xfc, 0x8b, + 0xdc, 0x98, 0x19, 0x0a, 0x32, 0x37, 0x53, 0x66, 0xc1, 0xc1, 0x79, 0xc0, 0x87, 0xdd, 0x74, 0x46, + 0x0f, 0xa1, 0x99, 0x31, 0x6a, 0xc8, 0x58, 0x9f, 0x33, 0x24, 0x3a, 0xf3, 0xd3, 0x7d, 0x68, 0x78, + 0xe1, 0x64, 0xe2, 0x27, 0x86, 0x8a, 0x48, 0x3d, 0x45, 0xa6, 0x36, 0x0e, 0x94, 0x5e, 0x7a, 0x0c, + 0xe0, 0x8d, 0x98, 0x77, 0x13, 0x85, 0x7e, 0x90, 0x18, 0x75, 0xc4, 0x6e, 0x48, 0xec, 0xdc, 0x2e, + 0xd2, 0xc8, 0x67, 0x22, 0xf9, 0x3b, 0x9f, 0xbd, 0x70, 0xbc, 0x91, 0x1b, 0x0c, 0x99, 0xd1, 0x28, + 0x26, 0x5f, 0x70, 0x08, 0x96, 0x98, 0x5e, 0xe0, 0x8c, 0xfe, 0x02, 0x6b, 0x01, 0xf7, 0x09, 0x8b, + 0xd1, 0x44, 0x4a, 0x3b, 0xa5, 0x64, 0x56, 0x91, 0x3e, 0x1f, 0x3f, 0xe5, 0x43, 0xfa, 0x1f, 0x6c, + 0x0d, 0x18, 0x17, 0xca, 0x29, 0x2b, 0xbc, 0x86, 0xbc, 0xef, 0x52, 0xde, 0x12, 0x00, 0x0f, 0xb1, + 0x89, 0x66, 0xbb, 0x28, 0xf6, 0x3f, 0xb0, 0x1d, 0xb3, 0x64, 0x16, 0x07, 0x95, 0x68, 0xda, 0xaa, + 0xfd, 0xa2, 0x29, 0xa5, 0x18, 0xe8, 0x5c, 0xe3, 0xfa, 0xbb, 0xf7, 0xe3, 0xd0, 0xed, 0x9b, 0xef, + 0x08, 0x34, 0x25, 0x85, 0x9e, 0x82, 0x36, 0xaf, 0x13, 0x59, 0x04, 0x1d, 0x2b, 0xad, 0x24, 0x2b, + 0xab, 0x24, 0xeb, 0x49, 0x86, 0xb0, 0x73, 0x30, 0x35, 0xe6, 0x01, 0xb1, 0x04, 0x74, 0x3b, 0x9b, + 0xd2, 0x1d, 0x00, 0xbe, 0x93, 0x63, 0xdf, 0x73, 0x1d, 0xbf, 0x8f, 0xbb, 0xad, 0xda, 0x9a, 0xb4, + 0xfc, 0xdb, 0xa7, 0xdf, 0x83, 0x36, 0xf5, 0x87, 0x81, 0xcb, 0x53, 0x64, 0xb8, 0xc3, 0xba, 0x9d, + 0x1b, 0xcc, 0xf7, 0xa4, 0x54, 0x5e, 0x94, 0x82, 0x8a, 0xb2, 0x13, 0x0c, 0x83, 0x63, 0xfa, 0x33, + 0x7c, 0x3b, 0x15, 0xf9, 0x07, 0x1e, 0x73, 0x82, 0xd9, 0xa4, 0xc7, 0x62, 0x4c, 0x41, 0xb5, 0xdb, + 0x99, 0xf9, 0x0a, 0xad, 0xf4, 0x47, 0xd0, 0x51, 0x13, 0xa7, 0xef, 0xf3, 0xe3, 0x92, 0x60, 0x2e, + 0x9a, 0xdd, 0x42, 0xdb, 0x5f, 0x68, 0xe2, 0x02, 0x54, 0x4e, 0x82, 0xfa, 0x59, 0x65, 0x2b, 0xe7, + 0xa0, 0xbc, 0xcc, 0x7a, 0x65, 0x99, 0xe6, 0x1b, 0x32, 0xaf, 0xf8, 0x2f, 0xbe, 0x88, 0x72, 0x2a, + 0x6a, 0x35, 0x95, 0xd7, 0x24, 0x3b, 0x51, 0x5f, 0x3b, 0x93, 0x2b, 0x80, 0xde, 0x38, 0xf4, 0x6e, + 0x1c, 0x3f, 0x18, 0x84, 0x18, 0x0f, 0x67, 0xf2, 0xaf, 0x69, 0x52, 0x2d, 0xb4, 0xc9, 0x5f, 0xee, + 0x64, 0x84, 0x91, 0x3b, 0x1d, 0xc9, 0x42, 0xd3, 0xd0, 0x72, 0xc9, 0x0d, 0x66, 0xbf, 0x78, 0x05, + 0x2c, 0x5b, 0x08, 0x59, 0xba, 0x90, 0x72, 0x96, 0x4a, 0xb5, 0x42, 0xdb, 0xa0, 0xc8, 0xc2, 0xd5, + 0x6c, 0x3e, 0x32, 0xdf, 0xd6, 0x4a, 0xb7, 0xc6, 0x52, 0x11, 0x75, 0x20, 0x23, 0x19, 0x89, 0x8c, + 0x78, 0x26, 0xaa, 0x37, 0x65, 0x42, 0xa1, 0x5a, 0x5e, 0x4c, 0x85, 0x10, 0xd6, 0x85, 0x8d, 0x00, + 0x7a, 0x00, 0x6a, 0x24, 0x80, 0x2a, 0x02, 0xb7, 0x17, 0x81, 0xdd, 0x6b, 0x1b, 0x11, 0x02, 0x79, + 0x2b, 0x90, 0xf5, 0x55, 0x48, 0x81, 0xa8, 0xac, 0xae, 0xb1, 0xf2, 0xfc, 0x35, 0x2b, 0xe7, 0xaf, + 0xf3, 0x27, 0x90, 0x8b, 0xc7, 0x0b, 0x59, 0x51, 0xaa, 0xd3, 0x07, 0xa5, 0x7b, 0xfd, 0x78, 0x7a, + 0xb5, 0xa0, 0x94, 0xc5, 0x82, 0xca, 0xb4, 0xae, 0xe5, 0x5a, 0x9b, 0x47, 0x50, 0xef, 0x5e, 0x8b, + 0x95, 0xee, 0x43, 0x4d, 0x48, 0x42, 0x56, 0x48, 0x22, 0x00, 0xe6, 0x07, 0x92, 0x5f, 0xe0, 0x4b, + 0x77, 0xef, 0x27, 0x6e, 0x13, 0x91, 0x14, 0x8c, 0xb4, 0xd8, 0x0f, 0x6c, 0x74, 0xd3, 0x5f, 0x41, + 0x7d, 0x99, 0x6f, 0xab, 0x51, 0xee, 0x01, 0xd6, 0x73, 0xee, 0xfa, 0x3b, 0x48, 0xe2, 0x7b, 0x1b, + 0x51, 0x0f, 0x9c, 0x85, 0xce, 0x1f, 0xa0, 0xcd, 0x19, 0x74, 0x03, 0x6a, 0x37, 0xec, 0x5e, 0xe6, + 0x24, 0x86, 0x74, 0x1b, 0xea, 0x77, 0xee, 0x78, 0xc6, 0xa4, 0x28, 0xe9, 0xe4, 0x4c, 0x39, 0x25, + 0xe6, 0xb3, 0xa5, 0x0d, 0x66, 0x41, 0x4c, 0xf2, 0xd0, 0xe9, 0xac, 0xd6, 0xbd, 0x79, 0x52, 0xb9, + 0x0b, 0xe9, 0x1e, 0xd4, 0xb3, 0xe7, 0x41, 0x2d, 0x6f, 0xd9, 0x12, 0x63, 0xa7, 0x3e, 0xf3, 0x23, + 0x81, 0xf5, 0xf4, 0xc7, 0xd9, 0xeb, 0xe2, 0x70, 0xde, 0x5f, 0x64, 0x4b, 0x29, 0x13, 0x45, 0xb3, + 0xcc, 0xfa, 0xcf, 0xc2, 0x43, 0x44, 0x79, 0xfc, 0x43, 0x64, 0x0f, 0x74, 0x81, 0xca, 0x7e, 0x8b, + 0x25, 0xa2, 0x73, 0x54, 0x4b, 0x58, 0xff, 0x97, 0xb9, 0xfc, 0x06, 0x1a, 0xbf, 0xfa, 0xa2, 0xb1, + 0x2b, 0xde, 0x08, 0xea, 0xf2, 0x6c, 0x72, 0x44, 0xb1, 0x4d, 0xee, 0xc2, 0xda, 0x84, 0x25, 0x6e, + 0xdf, 0x4d, 0x5c, 0xb1, 0x19, 0xbc, 0x74, 0xaf, 0x42, 0xb9, 0x41, 0xe9, 0xa4, 0xd7, 0xc0, 0x0e, + 0xf9, 0xfb, 0xa7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18, 0x21, 0x64, 0x8c, 0x91, 0x09, 0x00, 0x00, +} diff --git a/consensus/pbft/mock_ledger_test.go b/consensus/pbft/mock_ledger_test.go index 14b75750f2c..1a5d127b96e 100644 --- a/consensus/pbft/mock_ledger_test.go +++ b/consensus/pbft/mock_ledger_test.go @@ -17,6 +17,7 @@ limitations under the License. package pbft import ( + "bytes" "fmt" "reflect" "sync" @@ -297,6 +298,27 @@ func (mock *MockLedger) GetBlockHeadMetadata() ([]byte, error) { } func (mock *MockLedger) simulateStateTransfer(info *protos.BlockchainInfo, peers []*protos.PeerID) { + if mock.blockHeight >= info.Height { + blockCursor := info.Height - 1 + validHash := info.CurrentBlockHash + for { + block, ok := mock.blocks[blockCursor] + if !ok { + break + } + hash, _ := mock.HashBlock(block) + if !bytes.Equal(hash, validHash) { + break + } + blockCursor-- + validHash = block.PreviousBlockHash + if blockCursor == ^uint64(0) { + return + } + } + panic(fmt.Sprintf("Asked to skip to a block (%d) which is lower than our current height of %d. (Corrupt block at %d with hash %x)", info.Height, mock.blockHeight, blockCursor, validHash)) + } + var remoteLedger consensus.ReadOnlyLedger if len(peers) > 0 { var ok bool @@ -309,9 +331,6 @@ func (mock *MockLedger) simulateStateTransfer(info *protos.BlockchainInfo, peers } fmt.Printf("TEST LEDGER skipping to %+v", info) p := 0 - if mock.blockHeight >= info.Height { - panic(fmt.Sprintf("Asked to skip to a block (%d) which is lower than our current height of %d", info.Height, mock.blockHeight)) - } for n := mock.blockHeight; n < info.Height; n++ { block, err := remoteLedger.GetBlock(n) diff --git a/consensus/pbft/mock_utilities_test.go b/consensus/pbft/mock_utilities_test.go index 21a793c5ac9..81e6ed99f67 100644 --- a/consensus/pbft/mock_utilities_test.go +++ b/consensus/pbft/mock_utilities_test.go @@ -25,8 +25,7 @@ import ( "github.com/hyperledger/fabric/consensus/util/events" "github.com/hyperledger/fabric/core/ledger/statemgmt" - gp "google/protobuf" - + "github.com/golang/protobuf/ptypes/timestamp" pb "github.com/hyperledger/fabric/protos" "github.com/spf13/viper" ) @@ -105,7 +104,7 @@ func createRunningPbftWithManager(id uint64, config *viper.Viper, stack innerSta } func createTx(tag int64) (tx *pb.Transaction) { - txTime := &gp.Timestamp{Seconds: tag, Nanos: 0} + txTime := ×tamp.Timestamp{Seconds: tag, Nanos: 0} tx = &pb.Transaction{Type: pb.Transaction_CHAINCODE_DEPLOY, Timestamp: txTime, Payload: []byte(fmt.Sprint(tag)), diff --git a/consensus/pbft/pbft-core.go b/consensus/pbft/pbft-core.go index 48fde18a54f..943572a5afb 100644 --- a/consensus/pbft/pbft-core.go +++ b/consensus/pbft/pbft-core.go @@ -155,6 +155,7 @@ type pbftCore struct { newViewTimeout time.Duration // progress timeout for new views newViewTimerReason string // what triggered the timer lastNewViewTimeout time.Duration // last timeout we used during this view change + broadcastTimeout time.Duration // progress timeout for broadcast outstandingReqBatches map[string]*RequestBatch // track whether we are waiting for request batches to execute nullRequestTimer events.Timer // timeout triggering a null request @@ -255,6 +256,10 @@ func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, etf events if err != nil { instance.nullRequestTimeout = 0 } + instance.broadcastTimeout, err = time.ParseDuration(config.GetString("general.timeout.broadcast")) + if err != nil { + panic(fmt.Errorf("Cannot parse new broadcast timeout: %s", err)) + } instance.activeView = true instance.replicaCount = instance.N @@ -266,6 +271,7 @@ func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, etf events logger.Infof("PBFT request timeout = %v", instance.requestTimeout) logger.Infof("PBFT view change timeout = %v", instance.newViewTimeout) logger.Infof("PBFT Checkpoint period (K) = %v", instance.K) + logger.Infof("PBFT broadcast timeout = %v", instance.broadcastTimeout) logger.Infof("PBFT Log multiplier = %v", instance.logMultiplier) logger.Infof("PBFT log size (L) = %v", instance.L) if instance.nullRequestTimeout > 0 { @@ -370,11 +376,11 @@ func (instance *pbftCore) ProcessEvent(e events.Event) events.Event { return nil } logger.Infof("Replica %d application caught up via state transfer, lastExec now %d", instance.id, update.seqNo) - // XXX create checkpoint instance.lastExec = update.seqNo instance.moveWatermarks(instance.lastExec) // The watermark movement handles moving this to a checkpoint boundary instance.skipInProgress = false instance.consumer.validateState() + instance.Checkpoint(update.seqNo, update.id) instance.executeOutstanding() case execDoneEvent: instance.execDoneSync() @@ -1154,17 +1160,40 @@ func (instance *pbftCore) recvCheckpoint(chkpt *Checkpoint) events.Event { instance.checkpointStore[*chkpt] = true + // Track how many different checkpoint values we have for the seqNo in question + diffValues := make(map[string]struct{}) + diffValues[chkpt.Id] = struct{}{} + matching := 0 for testChkpt := range instance.checkpointStore { - if testChkpt.SequenceNumber == chkpt.SequenceNumber && testChkpt.Id == chkpt.Id { - matching++ + if testChkpt.SequenceNumber == chkpt.SequenceNumber { + if testChkpt.Id == chkpt.Id { + matching++ + } else { + if _, ok := diffValues[testChkpt.Id]; !ok { + diffValues[testChkpt.Id] = struct{}{} + } + } } } logger.Debugf("Replica %d found %d matching checkpoints for seqNo %d, digest %s", instance.id, matching, chkpt.SequenceNumber, chkpt.Id) + // If f+2 different values have been observed, we'll never be able to get a stable cert for this seqNo + if count := len(diffValues); count > instance.f+1 { + logger.Panicf("Network unable to find stable certificate for seqNo %d (%d different values observed already)", + chkpt.SequenceNumber, count) + } + if matching == instance.f+1 { - // We do have a weak cert + // We have a weak cert + // If we have generated a checkpoint for this seqNo, make sure we have a match + if ownChkptID, ok := instance.chkpts[chkpt.SequenceNumber]; ok { + if ownChkptID != chkpt.Id { + logger.Panicf("Own checkpoint for seqNo %d (%s) different from weak checkpoint certificate (%s)", + chkpt.SequenceNumber, ownChkptID, chkpt.Id) + } + } instance.witnessCheckpointWeakCert(chkpt) } @@ -1181,8 +1210,7 @@ func (instance *pbftCore) recvCheckpoint(chkpt *Checkpoint) events.Event { // we have reached this checkpoint // Note, this is not divergent from the paper, as the paper requires that // the quorum certificate must contain 2f+1 messages, including its own - chkptID, ok := instance.chkpts[chkpt.SequenceNumber] - if !ok { + if _, ok := instance.chkpts[chkpt.SequenceNumber]; !ok { logger.Debugf("Replica %d found checkpoint quorum for seqNo %d, digest %s, but it has not reached this checkpoint itself yet", instance.id, chkpt.SequenceNumber, chkpt.Id) if instance.skipInProgress { @@ -1200,12 +1228,6 @@ func (instance *pbftCore) recvCheckpoint(chkpt *Checkpoint) events.Event { logger.Debugf("Replica %d found checkpoint quorum for seqNo %d, digest %s", instance.id, chkpt.SequenceNumber, chkpt.Id) - if chkptID != chkpt.Id { - logger.Criticalf("Replica %d generated a checkpoint of %s, but a quorum of the network agrees on %s. This is almost definitely non-deterministic chaincode.", - instance.id, chkptID, chkpt.Id) - instance.stateTransfer(nil) - } - instance.moveWatermarks(chkpt.SequenceNumber) return instance.processNewView() diff --git a/consensus/pbft/pbft-core_test.go b/consensus/pbft/pbft-core_test.go index 0517adb92e1..5b3d5d71d5e 100644 --- a/consensus/pbft/pbft-core_test.go +++ b/consensus/pbft/pbft-core_test.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "reflect" + "strconv" "strings" "sync" "testing" @@ -30,12 +31,41 @@ import ( "github.com/op/go-logging" "github.com/hyperledger/fabric/consensus/util/events" + pb "github.com/hyperledger/fabric/protos" ) func init() { logging.SetLevel(logging.DEBUG, "") } +func TestConfigSet(t *testing.T) { + config := loadConfig() + + testKeys := []string{ + "general.mode", + "general.N", + "general.f", + "general.K", + "general.logmultiplier", + "general.batchsize", + "general.byzantine", + "general.viewchangeperiod", + "general.timeout.batch", + "general.timeout.request", + "general.timeout.viewchange", + "general.timeout.resendviewchange", + "general.timeout.nullrequest", + "general.timeout.broadcast", + "executor.queuesize", + } + + for _, key := range testKeys { + if ok := config.IsSet(key); !ok { + t.Errorf("Cannot test env override because \"%s\" does not seem to be set", key) + } + } +} + func TestEnvOverride(t *testing.T) { config := loadConfig() @@ -43,11 +73,6 @@ func TestEnvOverride(t *testing.T) { envName := "CORE_PBFT_GENERAL_MODE" // env override name overrideValue := "overide_test" // value to override default value with - // test key - if ok := config.IsSet("general.mode"); !ok { - t.Fatalf("Cannot test env override because \"%s\" does not seem to be set", key) - } - os.Setenv(envName, overrideValue) // The override config value will cause other calls to fail unless unset. defer func() { @@ -66,6 +91,104 @@ func TestEnvOverride(t *testing.T) { } +func TestIntEnvOverride(t *testing.T) { + config := loadConfig() + + tests := []struct { + key string + envName string + overrideValue string + expectValue int + }{ + {"general.N", "CORE_PBFT_GENERAL_N", "8", 8}, + {"general.f", "CORE_PBFT_GENERAL_F", "2", 2}, + {"general.K", "CORE_PBFT_GENERAL_K", "20", 20}, + {"general.logmultiplier", "CORE_PBFT_GENERAL_LOGMULTIPLIER", "6", 6}, + {"general.batchsize", "CORE_PBFT_GENERAL_BATCHSIZE", "200", 200}, + {"general.viewchangeperiod", "CORE_PBFT_GENERAL_VIEWCHANGEPERIOD", "5", 5}, + {"executor.queuesize", "CORE_PBFT_EXECUTOR_QUEUESIZE", "50", 50}, + } + + for _, test := range tests { + os.Setenv(test.envName, test.overrideValue) + + if ok := config.IsSet(test.key); !ok { + t.Errorf("Env override in place, and key \"%s\" is not set", test.key) + } + + configVal := config.GetInt(test.key) + if configVal != test.expectValue { + t.Errorf("Env override in place, expected key \"%s\" to be \"%v\" but instead got \"%d\"", test.key, test.expectValue, configVal) + } + + os.Unsetenv(test.envName) + } +} + +func TestDurationEnvOverride(t *testing.T) { + config := loadConfig() + + tests := []struct { + key string + envName string + overrideValue string + expectValue time.Duration + }{ + {"general.timeout.batch", "CORE_PBFT_GENERAL_TIMEOUT_BATCH", "2s", 2 * time.Second}, + {"general.timeout.request", "CORE_PBFT_GENERAL_TIMEOUT_REQUEST", "4s", 4 * time.Second}, + {"general.timeout.viewchange", "CORE_PBFT_GENERAL_TIMEOUT_VIEWCHANGE", "5s", 5 * time.Second}, + {"general.timeout.resendviewchange", "CORE_PBFT_GENERAL_TIMEOUT_RESENDVIEWCHANGE", "200ms", 200 * time.Millisecond}, + {"general.timeout.nullrequest", "CORE_PBFT_GENERAL_TIMEOUT_NULLREQUEST", "1s", time.Second}, + {"general.timeout.broadcast", "CORE_PBFT_GENERAL_TIMEOUT_BROADCAST", "1m", time.Minute}, + } + + for _, test := range tests { + os.Setenv(test.envName, test.overrideValue) + + if ok := config.IsSet(test.key); !ok { + t.Errorf("Env override in place, and key \"%s\" is not set", test.key) + } + + configVal := config.GetDuration(test.key) + if configVal != test.expectValue { + t.Errorf("Env override in place, expected key \"%s\" to be \"%v\" but instead got \"%v\"", test.key, test.expectValue, configVal) + } + + os.Unsetenv(test.envName) + } +} + +func TestBoolEnvOverride(t *testing.T) { + config := loadConfig() + + tests := []struct { + key string + envName string + overrideValue string + expectValue bool + }{ + {"general.byzantine", "CORE_PBFT_GENERAL_BYZANTINE", "false", false}, + {"general.byzantine", "CORE_PBFT_GENERAL_BYZANTINE", "0", false}, + {"general.byzantine", "CORE_PBFT_GENERAL_BYZANTINE", "true", true}, + {"general.byzantine", "CORE_PBFT_GENERAL_BYZANTINE", "1", true}, + } + + for i, test := range tests { + os.Setenv(test.envName, test.overrideValue) + + if ok := config.IsSet(test.key); !ok { + t.Errorf("Env override in place, and key \"%s\" is not set", test.key) + } + + configVal := config.GetBool(test.key) + if configVal != test.expectValue { + t.Errorf("Test %d Env override in place, expected key \"%s\" to be \"%v\" but instead got \"%v\"", i, test.key, test.expectValue, configVal) + } + + os.Unsetenv(test.envName) + } +} + func TestMaliciousPrePrepare(t *testing.T) { mock := &omniProto{ broadcastImpl: func(msgPayload []byte) { @@ -1058,6 +1181,73 @@ func TestReplicaCrash3(t *testing.T) { } } +// TestReplicaCrash4 simulates the restart with no checkpoints +// in the store because they have been garbage collected +// the bug occurs because the low watermark is incorrectly set to +// be zero +func TestReplicaCrash4(t *testing.T) { + validatorCount := 4 + config := loadConfig() + config.Set("general.K", 2) + config.Set("general.logmultiplier", 2) + net := makePBFTNetwork(validatorCount, config) + defer net.stop() + + twoOffline := false + threeOffline := true + net.filterFn = func(src int, dst int, msg []byte) []byte { + if twoOffline && dst == 2 { // 2 is 'offline' + return nil + } + if threeOffline && dst == 3 { // 3 is 'offline' + return nil + } + return msg + } + + for i := int64(1); i <= 8; i++ { + net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(i, uint64(generateBroadcaster(validatorCount))) + } + net.process() // vp0,1,2 should have a stable checkpoint for seqNo 8 + net.process() // this second time is necessary for garbage collection it seams + + // Now vp0,1,2 should be in sync with 8 executions in view 0, and vp4 should be offline + for i, pep := range net.pbftEndpoints { + + if i == 3 { + // 3 is offline for this test + continue + } + + if pep.pbft.view != 0 { + t.Errorf("Expected replica %d to be in view 1, got %d", pep.id, pep.pbft.view) + } + + expectedExecutions := uint64(8) + if pep.sc.executions != expectedExecutions { + t.Errorf("Expected %d executions on replica %d, got %d", expectedExecutions, pep.id, pep.sc.executions) + } + } + + // Create new pbft instances to restore from persistence + for id := 0; id < 3; id++ { + pe := net.pbftEndpoints[id] + config := loadConfig() + config.Set("general.K", "2") + pe.pbft.close() + pe.pbft = newPbftCore(uint64(id), config, pe.sc, events.NewTimerFactoryImpl(pe.manager)) + pe.manager.SetReceiver(pe.pbft) + pe.pbft.N = 4 + pe.pbft.f = (4 - 1) / 3 + pe.pbft.requestTimeout = 200 * time.Millisecond + + expected := uint64(8) + if pe.pbft.h != expected { + t.Errorf("Low watermark should have been %d, got %d", expected, pe.pbft.h) + } + } + +} func TestReplicaPersistQSet(t *testing.T) { persist := make(map[string][]byte) @@ -1514,6 +1704,30 @@ func TestViewChangeDuringExecution(t *testing.T) { } } +func TestStateTransferCheckpoint(t *testing.T) { + broadcasts := 0 + instance := newPbftCore(3, loadConfig(), &omniProto{ + broadcastImpl: func(msg []byte) { + broadcasts++ + }, + validateStateImpl: func() {}, + }, &inertTimerFactory{}) + + id := []byte("My ID") + events.SendEvent(instance, stateUpdatedEvent{ + chkpt: &checkpointMessage{ + seqNo: 10, + id: id, + }, + target: &pb.BlockchainInfo{}, + }) + + if broadcasts != 1 { + t.Fatalf("Should have broadcast a checkpoint after the state transfer finished") + } + +} + func TestStateTransferredToOldPoint(t *testing.T) { skipped := false instance := newPbftCore(3, loadConfig(), &omniProto{ @@ -1563,42 +1777,53 @@ func TestStateNetworkMovesOnDuringSlowStateTransfer(t *testing.T) { } } -// This test is designed to ensure state transfer occurs if our checkpoint does not match a quorum cert -func TestCheckpointDiffersFromQuorum(t *testing.T) { - invalidated := false - skipped := false - instance := newPbftCore(3, loadConfig(), &omniProto{ - invalidateStateImpl: func() { invalidated = true }, - skipToImpl: func(s uint64, id []byte, replicas []uint64) { skipped = true }, - }, &inertTimerFactory{}) +// This test is designed to ensure the peer panics if the value of the weak cert is different from its own checkpoint +func TestCheckpointDiffersFromWeakCert(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Weak checkpoint certificate different from own, should have panicked.") + } + }() - seqNo := uint64(10) + instance := newPbftCore(3, loadConfig(), &omniProto{}, &inertTimerFactory{}) badChkpt := &Checkpoint{ SequenceNumber: 10, - Id: base64.StdEncoding.EncodeToString([]byte("WRONG")), - ReplicaId: 0, + Id: "WRONG", + ReplicaId: 3, } - instance.chkpts[seqNo] = badChkpt.Id // This is done via the exec path, shortcut it here + instance.chkpts[10] = badChkpt.Id // This is done via the exec path, shortcut it here events.SendEvent(instance, badChkpt) - for i := uint64(1); i <= 3; i++ { + for i := uint64(0); i < 2; i++ { events.SendEvent(instance, &Checkpoint{ SequenceNumber: 10, - Id: base64.StdEncoding.EncodeToString([]byte("CORRECT")), + Id: "CORRECT", ReplicaId: i, }) } - if instance.h != 10 { - t.Fatalf("Replica should have moved its watermarks but did not") + if instance.highStateTarget != nil { + t.Fatalf("State target should not have been updated") } +} - if !instance.skipInProgress { - t.Fatalf("Replica should be attempting state transfer") - } +// This test is designed to ensure the peer panics if it observes > f+1 different checkpoint values for the same seqNo +// This indicates a network that will be unable to move its watermarks and thus progress +func TestNoCheckpointQuorum(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("More than f+1 different checkpoint values found, should have panicked.") + } + }() + + instance := newPbftCore(3, loadConfig(), &omniProto{}, &inertTimerFactory{}) - if !invalidated || !skipped { - t.Fatalf("Replica should have invalidated its state and skipped") + for i := uint64(0); i < 3; i++ { + events.SendEvent(instance, &Checkpoint{ + SequenceNumber: 10, + Id: strconv.FormatUint(i, 10), + ReplicaId: i, + }) } } diff --git a/consensus/pbft/pbft-persist.go b/consensus/pbft/pbft-persist.go index 52e0f6dfbcd..a5b672ea26d 100644 --- a/consensus/pbft/pbft-persist.go +++ b/consensus/pbft/pbft-persist.go @@ -148,9 +148,11 @@ func (instance *pbftCore) restoreState() { logger.Warningf("Replica %d could not restore reqBatchStore: %s", instance.id, err) } + instance.restoreLastSeqNo() + chkpts, err := instance.consumer.ReadStateSet("chkpt.") if err == nil { - highSeq := uint64(0) + lowWatermark := instance.lastExec // This is safe because we will round down in moveWatermarks for key, id := range chkpts { var seqNo uint64 if _, err = fmt.Sscanf(key, "chkpt.%d", &seqNo); err != nil { @@ -159,20 +161,18 @@ func (instance *pbftCore) restoreState() { idAsString := base64.StdEncoding.EncodeToString(id) logger.Debugf("Replica %d found checkpoint %s for seqNo %d", instance.id, idAsString, seqNo) instance.chkpts[seqNo] = idAsString - if seqNo > highSeq { - highSeq = seqNo + if seqNo < lowWatermark { + lowWatermark = seqNo } } } - instance.moveWatermarks(highSeq) + instance.moveWatermarks(lowWatermark) } else { logger.Warningf("Replica %d could not restore checkpoints: %s", instance.id, err) } - instance.restoreLastSeqNo() - - logger.Infof("Replica %d restored state: view: %d, seqNo: %d, pset: %d, qset: %d, reqBatches: %d, chkpts: %d", - instance.id, instance.view, instance.seqNo, len(instance.pset), len(instance.qset), len(instance.reqBatchStore), len(instance.chkpts)) + logger.Infof("Replica %d restored state: view: %d, seqNo: %d, pset: %d, qset: %d, reqBatches: %d, chkpts: %d h: %d", + instance.id, instance.view, instance.seqNo, len(instance.pset), len(instance.qset), len(instance.reqBatchStore), len(instance.chkpts), instance.h) } func (instance *pbftCore) restoreLastSeqNo() { diff --git a/consensus/simplebft/simplebft.pb.go b/consensus/simplebft/simplebft.pb.go index 0c863a8b3e4..85458f60d94 100644 --- a/consensus/simplebft/simplebft.pb.go +++ b/consensus/simplebft/simplebft.pb.go @@ -33,17 +33,24 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type Config struct { N uint64 `protobuf:"varint,1,opt,name=n" json:"n,omitempty"` F uint64 `protobuf:"varint,2,opt,name=f" json:"f,omitempty"` - BatchDurationNsec uint64 `protobuf:"varint,3,opt,name=batch_duration_nsec" json:"batch_duration_nsec,omitempty"` - BatchSizeBytes uint64 `protobuf:"varint,4,opt,name=batch_size_bytes" json:"batch_size_bytes,omitempty"` - RequestTimeoutNsec uint64 `protobuf:"varint,5,opt,name=request_timeout_nsec" json:"request_timeout_nsec,omitempty"` + BatchDurationNsec uint64 `protobuf:"varint,3,opt,name=batch_duration_nsec,json=batchDurationNsec" json:"batch_duration_nsec,omitempty"` + BatchSizeBytes uint64 `protobuf:"varint,4,opt,name=batch_size_bytes,json=batchSizeBytes" json:"batch_size_bytes,omitempty"` + RequestTimeoutNsec uint64 `protobuf:"varint,5,opt,name=request_timeout_nsec,json=requestTimeoutNsec" json:"request_timeout_nsec,omitempty"` } -func (m *Config) Reset() { *m = Config{} } -func (m *Config) String() string { return proto.CompactTextString(m) } -func (*Config) ProtoMessage() {} +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type Msg struct { // Types that are valid to be assigned to Type: @@ -58,9 +65,10 @@ type Msg struct { Type isMsg_Type `protobuf_oneof:"type"` } -func (m *Msg) Reset() { *m = Msg{} } -func (m *Msg) String() string { return proto.CompactTextString(m) } -func (*Msg) ProtoMessage() {} +func (m *Msg) Reset() { *m = Msg{} } +func (m *Msg) String() string { return proto.CompactTextString(m) } +func (*Msg) ProtoMessage() {} +func (*Msg) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type isMsg_Type interface { isMsg_Type() @@ -79,10 +87,10 @@ type Msg_Commit struct { Commit *Subject `protobuf:"bytes,4,opt,name=commit,oneof"` } type Msg_ViewChange struct { - ViewChange *Signed `protobuf:"bytes,5,opt,name=view_change,oneof"` + ViewChange *Signed `protobuf:"bytes,5,opt,name=view_change,json=viewChange,oneof"` } type Msg_NewView struct { - NewView *NewView `protobuf:"bytes,6,opt,name=new_view,oneof"` + NewView *NewView `protobuf:"bytes,6,opt,name=new_view,json=newView,oneof"` } type Msg_Checkpoint struct { Checkpoint *Checkpoint `protobuf:"bytes,7,opt,name=checkpoint,oneof"` @@ -164,8 +172,8 @@ func (m *Msg) GetHello() *Batch { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*Msg) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _Msg_OneofMarshaler, _Msg_OneofUnmarshaler, []interface{}{ +func (*Msg) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Msg_OneofMarshaler, _Msg_OneofUnmarshaler, _Msg_OneofSizer, []interface{}{ (*Msg_Request)(nil), (*Msg_Preprepare)(nil), (*Msg_Prepare)(nil), @@ -300,32 +308,86 @@ func _Msg_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (b } } +func _Msg_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Msg) + // type + switch x := m.Type.(type) { + case *Msg_Request: + s := proto.Size(x.Request) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Msg_Preprepare: + s := proto.Size(x.Preprepare) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Msg_Prepare: + s := proto.Size(x.Prepare) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Msg_Commit: + s := proto.Size(x.Commit) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Msg_ViewChange: + s := proto.Size(x.ViewChange) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Msg_NewView: + s := proto.Size(x.NewView) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Msg_Checkpoint: + s := proto.Size(x.Checkpoint) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Msg_Hello: + s := proto.Size(x.Hello) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type Request struct { Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` } -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type SeqView struct { View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"` Seq uint64 `protobuf:"varint,2,opt,name=seq" json:"seq,omitempty"` } -func (m *SeqView) Reset() { *m = SeqView{} } -func (m *SeqView) String() string { return proto.CompactTextString(m) } -func (*SeqView) ProtoMessage() {} +func (m *SeqView) Reset() { *m = SeqView{} } +func (m *SeqView) String() string { return proto.CompactTextString(m) } +func (*SeqView) ProtoMessage() {} +func (*SeqView) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type BatchHeader struct { Seq uint64 `protobuf:"varint,1,opt,name=seq" json:"seq,omitempty"` - PrevHash []byte `protobuf:"bytes,2,opt,name=prev_hash,proto3" json:"prev_hash,omitempty"` - DataHash []byte `protobuf:"bytes,3,opt,name=data_hash,proto3" json:"data_hash,omitempty"` + PrevHash []byte `protobuf:"bytes,2,opt,name=prev_hash,json=prevHash,proto3" json:"prev_hash,omitempty"` + DataHash []byte `protobuf:"bytes,3,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` } -func (m *BatchHeader) Reset() { *m = BatchHeader{} } -func (m *BatchHeader) String() string { return proto.CompactTextString(m) } -func (*BatchHeader) ProtoMessage() {} +func (m *BatchHeader) Reset() { *m = BatchHeader{} } +func (m *BatchHeader) String() string { return proto.CompactTextString(m) } +func (*BatchHeader) ProtoMessage() {} +func (*BatchHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } type Batch struct { Header []byte `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` @@ -333,9 +395,10 @@ type Batch struct { Signatures map[uint64][]byte `protobuf:"bytes,3,rep,name=signatures" json:"signatures,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Batch) Reset() { *m = Batch{} } -func (m *Batch) String() string { return proto.CompactTextString(m) } -func (*Batch) ProtoMessage() {} +func (m *Batch) Reset() { *m = Batch{} } +func (m *Batch) String() string { return proto.CompactTextString(m) } +func (*Batch) ProtoMessage() {} +func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *Batch) GetSignatures() map[uint64][]byte { if m != nil { @@ -349,9 +412,10 @@ type Preprepare struct { Batch *Batch `protobuf:"bytes,2,opt,name=batch" json:"batch,omitempty"` } -func (m *Preprepare) Reset() { *m = Preprepare{} } -func (m *Preprepare) String() string { return proto.CompactTextString(m) } -func (*Preprepare) ProtoMessage() {} +func (m *Preprepare) Reset() { *m = Preprepare{} } +func (m *Preprepare) String() string { return proto.CompactTextString(m) } +func (*Preprepare) ProtoMessage() {} +func (*Preprepare) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *Preprepare) GetSeq() *SeqView { if m != nil { @@ -372,9 +436,10 @@ type Subject struct { Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` } -func (m *Subject) Reset() { *m = Subject{} } -func (m *Subject) String() string { return proto.CompactTextString(m) } -func (*Subject) ProtoMessage() {} +func (m *Subject) Reset() { *m = Subject{} } +func (m *Subject) String() string { return proto.CompactTextString(m) } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *Subject) GetSeq() *SeqView { if m != nil { @@ -390,9 +455,10 @@ type ViewChange struct { Executed uint64 `protobuf:"varint,4,opt,name=executed" json:"executed,omitempty"` } -func (m *ViewChange) Reset() { *m = ViewChange{} } -func (m *ViewChange) String() string { return proto.CompactTextString(m) } -func (*ViewChange) ProtoMessage() {} +func (m *ViewChange) Reset() { *m = ViewChange{} } +func (m *ViewChange) String() string { return proto.CompactTextString(m) } +func (*ViewChange) ProtoMessage() {} +func (*ViewChange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *ViewChange) GetPset() []*Subject { if m != nil { @@ -413,9 +479,10 @@ type Signed struct { Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *Signed) Reset() { *m = Signed{} } -func (m *Signed) String() string { return proto.CompactTextString(m) } -func (*Signed) ProtoMessage() {} +func (m *Signed) Reset() { *m = Signed{} } +func (m *Signed) String() string { return proto.CompactTextString(m) } +func (*Signed) ProtoMessage() {} +func (*Signed) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } type NewView struct { View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"` @@ -424,9 +491,10 @@ type NewView struct { Batch *Batch `protobuf:"bytes,4,opt,name=batch" json:"batch,omitempty"` } -func (m *NewView) Reset() { *m = NewView{} } -func (m *NewView) String() string { return proto.CompactTextString(m) } -func (*NewView) ProtoMessage() {} +func (m *NewView) Reset() { *m = NewView{} } +func (m *NewView) String() string { return proto.CompactTextString(m) } +func (*NewView) ProtoMessage() {} +func (*NewView) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *NewView) GetVset() map[uint64]*Signed { if m != nil { @@ -455,6 +523,74 @@ type Checkpoint struct { Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *Checkpoint) Reset() { *m = Checkpoint{} } -func (m *Checkpoint) String() string { return proto.CompactTextString(m) } -func (*Checkpoint) ProtoMessage() {} +func (m *Checkpoint) Reset() { *m = Checkpoint{} } +func (m *Checkpoint) String() string { return proto.CompactTextString(m) } +func (*Checkpoint) ProtoMessage() {} +func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func init() { + proto.RegisterType((*Config)(nil), "simplebft.Config") + proto.RegisterType((*Msg)(nil), "simplebft.Msg") + proto.RegisterType((*Request)(nil), "simplebft.Request") + proto.RegisterType((*SeqView)(nil), "simplebft.SeqView") + proto.RegisterType((*BatchHeader)(nil), "simplebft.BatchHeader") + proto.RegisterType((*Batch)(nil), "simplebft.Batch") + proto.RegisterType((*Preprepare)(nil), "simplebft.Preprepare") + proto.RegisterType((*Subject)(nil), "simplebft.Subject") + proto.RegisterType((*ViewChange)(nil), "simplebft.ViewChange") + proto.RegisterType((*Signed)(nil), "simplebft.Signed") + proto.RegisterType((*NewView)(nil), "simplebft.NewView") + proto.RegisterType((*Checkpoint)(nil), "simplebft.Checkpoint") +} + +func init() { proto.RegisterFile("simplebft.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 727 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x55, 0xdb, 0x6e, 0xd3, 0x4c, + 0x10, 0xfe, 0x5d, 0xe7, 0x38, 0xa9, 0xfe, 0xb6, 0x4b, 0x41, 0x56, 0xe9, 0x45, 0x65, 0x50, 0xe9, + 0x05, 0x4a, 0xab, 0x80, 0x04, 0xaa, 0x84, 0x84, 0x5a, 0x10, 0x15, 0x12, 0x15, 0x72, 0xab, 0x4a, + 0x70, 0x13, 0x39, 0xf6, 0x24, 0x36, 0x4d, 0xec, 0xc4, 0x5e, 0xb7, 0x0d, 0xcf, 0xc0, 0x33, 0xf0, + 0x0c, 0x3c, 0x00, 0x6f, 0xc4, 0x4b, 0xb0, 0x3b, 0xbb, 0xb1, 0x4d, 0x0e, 0x08, 0x29, 0x17, 0x3b, + 0xf3, 0x7d, 0xb3, 0x3b, 0xdf, 0x1c, 0x1c, 0xd8, 0x48, 0xc3, 0xd1, 0x78, 0x88, 0xbd, 0x3e, 0x6f, + 0x8f, 0x93, 0x98, 0xc7, 0xac, 0x99, 0x3b, 0xec, 0x1f, 0x06, 0xd4, 0x4e, 0xe3, 0xa8, 0x1f, 0x0e, + 0xd8, 0x3a, 0x18, 0x91, 0x65, 0xec, 0x19, 0x07, 0x15, 0xc7, 0x88, 0xa4, 0xd5, 0xb7, 0xd6, 0x94, + 0xd5, 0x67, 0x6d, 0xb8, 0xd7, 0x73, 0xb9, 0x17, 0x74, 0xfd, 0x2c, 0x71, 0x79, 0x18, 0x47, 0xdd, + 0x28, 0x45, 0xcf, 0x32, 0x09, 0xdf, 0x22, 0xe8, 0x8d, 0x46, 0xce, 0x05, 0xc0, 0x0e, 0x60, 0x53, + 0xf1, 0xd3, 0xf0, 0x2b, 0x76, 0x7b, 0x53, 0x8e, 0xa9, 0x55, 0x21, 0xf2, 0xff, 0xe4, 0xbf, 0x10, + 0xee, 0x13, 0xe9, 0x65, 0x47, 0xb0, 0x9d, 0xe0, 0x24, 0xc3, 0x94, 0x77, 0x79, 0x38, 0xc2, 0x38, + 0xe3, 0xea, 0xea, 0x2a, 0xb1, 0x99, 0xc6, 0x2e, 0x15, 0x24, 0xef, 0xb6, 0xbf, 0x9b, 0x60, 0x7e, + 0x48, 0x07, 0x22, 0xa7, 0xba, 0x46, 0x29, 0xeb, 0x56, 0x87, 0xb5, 0x0b, 0xa1, 0x8e, 0x42, 0xce, + 0xfe, 0x73, 0x66, 0x24, 0xf6, 0x02, 0x60, 0x9c, 0xa0, 0xfc, 0xb9, 0x09, 0x92, 0xb4, 0x56, 0xe7, + 0x7e, 0x29, 0xe4, 0x63, 0x0e, 0x8a, 0xa8, 0x12, 0x55, 0x3e, 0x34, 0x8b, 0x32, 0x17, 0x1e, 0xba, + 0xc8, 0x7a, 0x5f, 0xd0, 0xa3, 0x87, 0x66, 0xfc, 0xa7, 0x50, 0xf3, 0xe2, 0xd1, 0x28, 0xe4, 0x24, + 0x79, 0x15, 0x5d, 0x73, 0xd8, 0x73, 0x68, 0xdd, 0x84, 0x78, 0xdb, 0xf5, 0x02, 0x37, 0x1a, 0x20, + 0xe9, 0x6e, 0x75, 0xb6, 0xca, 0x21, 0xe1, 0x20, 0x42, 0x5f, 0xe6, 0x24, 0x79, 0xa7, 0x44, 0x63, + 0x87, 0xd0, 0x88, 0x44, 0x90, 0xf4, 0x58, 0xb5, 0x85, 0x57, 0xce, 0xf1, 0xf6, 0x4a, 0x20, 0x32, + 0xa9, 0x48, 0x1d, 0xa5, 0x7a, 0x2f, 0x40, 0xef, 0x7a, 0x1c, 0x87, 0x11, 0xb7, 0xea, 0x0b, 0xea, + 0x4f, 0x73, 0x50, 0xbe, 0x54, 0x50, 0x45, 0x2b, 0xab, 0x01, 0x0e, 0x87, 0xb1, 0xd5, 0xa0, 0x98, + 0xcd, 0x52, 0xcc, 0x89, 0x6c, 0xa5, 0xa0, 0x2b, 0xc2, 0x49, 0x0d, 0x2a, 0x7c, 0x3a, 0x46, 0xfb, + 0x11, 0xd4, 0x75, 0xf9, 0x99, 0x25, 0x4a, 0xe7, 0x4e, 0x87, 0xb1, 0xeb, 0x53, 0x8f, 0xd6, 0x9d, + 0x99, 0x69, 0x1f, 0x42, 0xfd, 0x02, 0x27, 0x94, 0x1a, 0x83, 0x0a, 0xe9, 0x50, 0xb3, 0x47, 0x67, + 0xb6, 0x09, 0x66, 0x8a, 0x13, 0x3d, 0x80, 0xf2, 0x68, 0x7f, 0x82, 0x96, 0x7a, 0x0f, 0x5d, 0x1f, + 0x93, 0x19, 0xc1, 0xc8, 0x09, 0xec, 0x21, 0x34, 0x45, 0x07, 0x6e, 0xba, 0x81, 0x9b, 0x06, 0x14, + 0xb8, 0xee, 0x34, 0xa4, 0xe3, 0x4c, 0xd8, 0x12, 0xf4, 0x5d, 0xee, 0x2a, 0xd0, 0x54, 0xa0, 0x74, + 0x48, 0xd0, 0xfe, 0x69, 0x40, 0x95, 0xee, 0x66, 0x0f, 0xa0, 0x16, 0xd0, 0xfd, 0x3a, 0x5d, 0x6d, + 0xb1, 0x1d, 0x68, 0xe8, 0xc4, 0x53, 0x71, 0xb5, 0x49, 0x57, 0x6b, 0x9b, 0xbd, 0x06, 0x48, 0x45, + 0x8b, 0x5c, 0x9e, 0x25, 0x62, 0xca, 0x4d, 0x81, 0xb6, 0x3a, 0x7b, 0xf3, 0x55, 0xa2, 0x2e, 0x2a, + 0xca, 0xdb, 0x88, 0x27, 0x53, 0xa7, 0x14, 0xb3, 0xf3, 0x0a, 0x36, 0xe6, 0x60, 0x29, 0xef, 0x1a, + 0xa7, 0x33, 0x79, 0xe2, 0xc8, 0xb6, 0xa1, 0x7a, 0xe3, 0x0e, 0x33, 0xd4, 0xd2, 0x94, 0x71, 0xbc, + 0xf6, 0xd2, 0xb0, 0x3f, 0x03, 0x14, 0xb3, 0xcb, 0x1e, 0x17, 0x85, 0x99, 0x1b, 0x3d, 0x55, 0x6e, + 0x55, 0xac, 0x7d, 0xa8, 0xd2, 0x22, 0xea, 0x3d, 0x58, 0xe8, 0xaa, 0xa3, 0x60, 0xfb, 0x9d, 0x68, + 0x93, 0x1a, 0xd9, 0x7f, 0xbc, 0x58, 0x54, 0xd0, 0x0f, 0x07, 0x72, 0x29, 0x55, 0x9e, 0xda, 0xb2, + 0xbf, 0x19, 0x00, 0x57, 0xc5, 0xfc, 0x2e, 0xeb, 0xf9, 0x3e, 0x54, 0xc6, 0x29, 0x72, 0x2a, 0xf0, + 0xd2, 0xad, 0x71, 0x08, 0x97, 0xbc, 0x89, 0xe4, 0x99, 0xab, 0x79, 0x12, 0x97, 0x4d, 0xc3, 0x3b, + 0xf4, 0x32, 0x8e, 0xbe, 0xfe, 0xf8, 0xe4, 0xb6, 0x7d, 0x0c, 0x35, 0xb5, 0x57, 0x32, 0x13, 0x39, + 0x08, 0xba, 0xe1, 0x74, 0x66, 0xbb, 0xd0, 0xcc, 0xdb, 0xa3, 0x75, 0x14, 0x0e, 0xfb, 0x97, 0x01, + 0x75, 0xbd, 0x61, 0x4b, 0x75, 0x1c, 0x09, 0x5f, 0xa1, 0x63, 0x77, 0x71, 0x2f, 0xdb, 0x57, 0x02, + 0x56, 0x63, 0x40, 0x4c, 0xa9, 0xe8, 0x4e, 0x29, 0x32, 0x56, 0x29, 0xba, 0x53, 0x3c, 0xdd, 0xb5, + 0xca, 0x5f, 0xbb, 0xb6, 0xf3, 0x1e, 0x9a, 0xf9, 0x13, 0x4b, 0x46, 0xe9, 0x49, 0x79, 0x94, 0x96, + 0x7d, 0x6c, 0xca, 0xd3, 0x75, 0x09, 0x50, 0x7c, 0x1b, 0x96, 0xac, 0xdd, 0x8a, 0x86, 0xff, 0x59, + 0x43, 0x73, 0xae, 0x86, 0xbd, 0x1a, 0xfd, 0x13, 0x3d, 0xfb, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x63, + 0xf4, 0x2e, 0x0e, 0x9c, 0x06, 0x00, 0x00, +} diff --git a/consensus/util/messagefan.go b/consensus/util/messagefan.go index 4d4a66edcd0..dad3b0e962a 100644 --- a/consensus/util/messagefan.go +++ b/consensus/util/messagefan.go @@ -38,7 +38,7 @@ type Message struct { // MessageFan contains the reference to the peer's MessageHandlerCoordinator type MessageFan struct { - ins map[*pb.PeerID]<-chan *Message + ins []<-chan *Message out chan *Message lock sync.Mutex } @@ -46,35 +46,38 @@ type MessageFan struct { // NewMessageFan will return an initialized MessageFan func NewMessageFan() *MessageFan { return &MessageFan{ - ins: make(map[*pb.PeerID]<-chan *Message), + ins: []<-chan *Message{}, out: make(chan *Message), } } -// RegisterChannel is intended to be invoked by Handler to add a channel to be fan-ed in -func (fan *MessageFan) RegisterChannel(sender *pb.PeerID, channel <-chan *Message) { +// AddFaninChannel is intended to be invoked by Handler to add a channel to be fan-ed in +func (fan *MessageFan) AddFaninChannel(channel <-chan *Message) { fan.lock.Lock() defer fan.lock.Unlock() - if _, ok := fan.ins[sender]; ok { - logger.Warningf("Received duplicate connection from %v, switching to new connection", sender) - } else { - logger.Infof("Registering connection from %v", sender) + for _, c := range fan.ins { + if c == channel { + logger.Warningf("Received duplicate connection") + return + } } - fan.ins[sender] = channel + fan.ins = append(fan.ins, channel) go func() { for msg := range channel { fan.out <- msg } - logger.Infof("Connection from peer %v terminated", sender) - fan.lock.Lock() defer fan.lock.Unlock() - delete(fan.ins, sender) + for i, c := range fan.ins { + if c == channel { + fan.ins = append(fan.ins[:i], fan.ins[i+1:]...) + } + } }() } diff --git a/consensus/util/messagefan_test.go b/consensus/util/messagefan_test.go index b30d983cc75..ebf19e2f9ed 100644 --- a/consensus/util/messagefan_test.go +++ b/consensus/util/messagefan_test.go @@ -17,11 +17,8 @@ limitations under the License. package util import ( - "fmt" "testing" "time" - - pb "github.com/hyperledger/fabric/protos" ) func TestFanIn(t *testing.T) { @@ -32,8 +29,7 @@ func TestFanIn(t *testing.T) { for i := 0; i < Channels; i++ { c := make(chan *Message, Messages/2) - pid := &pb.PeerID{Name: fmt.Sprintf("%d", i)} - fh.RegisterChannel(pid, c) + fh.AddFaninChannel(c) go func() { for j := 0; j < Messages; j++ { c <- &Message{} @@ -67,8 +63,7 @@ func TestFanIn(t *testing.T) { func TestFanChannelClose(t *testing.T) { fh := NewMessageFan() c := make(chan *Message) - pid := &pb.PeerID{Name: "1"} - fh.RegisterChannel(pid, c) + fh.AddFaninChannel(c) close(c) for i := 0; i < 100; i++ { diff --git a/core/admin.go b/core/admin.go index 7221d2e4af4..2fa31debf70 100644 --- a/core/admin.go +++ b/core/admin.go @@ -24,8 +24,7 @@ import ( "github.com/spf13/viper" "golang.org/x/net/context" - "google/protobuf" - + "github.com/golang/protobuf/ptypes/empty" pb "github.com/hyperledger/fabric/protos" ) @@ -55,21 +54,21 @@ func worker(id int, die chan struct{}) { } // GetStatus reports the status of the server -func (*ServerAdmin) GetStatus(context.Context, *google_protobuf.Empty) (*pb.ServerStatus, error) { +func (*ServerAdmin) GetStatus(context.Context, *empty.Empty) (*pb.ServerStatus, error) { status := &pb.ServerStatus{Status: pb.ServerStatus_STARTED} log.Debugf("returning status: %s", status) return status, nil } // StartServer starts the server -func (*ServerAdmin) StartServer(context.Context, *google_protobuf.Empty) (*pb.ServerStatus, error) { +func (*ServerAdmin) StartServer(context.Context, *empty.Empty) (*pb.ServerStatus, error) { status := &pb.ServerStatus{Status: pb.ServerStatus_STARTED} log.Debugf("returning status: %s", status) return status, nil } // StopServer stops the server -func (*ServerAdmin) StopServer(context.Context, *google_protobuf.Empty) (*pb.ServerStatus, error) { +func (*ServerAdmin) StopServer(context.Context, *empty.Empty) (*pb.ServerStatus, error) { status := &pb.ServerStatus{Status: pb.ServerStatus_STOPPED} log.Debugf("returning status: %s", status) diff --git a/core/chaincode/chaincode_support.go b/core/chaincode/chaincode_support.go index 0acb5f38780..53e5e3b9cfe 100644 --- a/core/chaincode/chaincode_support.go +++ b/core/chaincode/chaincode_support.go @@ -25,6 +25,7 @@ import ( "time" "github.com/golang/protobuf/proto" + logging "github.com/op/go-logging" "github.com/spf13/viper" "golang.org/x/net/context" @@ -35,6 +36,7 @@ import ( "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/core/ledger" ledgernext "github.com/hyperledger/fabric/core/ledgernext" + "github.com/hyperledger/fabric/flogging" pb "github.com/hyperledger/fabric/protos" ) @@ -151,6 +153,21 @@ func NewChaincodeSupport(chainname ChainName, getPeerEndpoint func() (*pb.PeerEn s.keepalive = time.Duration(t) * time.Second } + viper.SetEnvPrefix("CORE") + viper.AutomaticEnv() + replacer := strings.NewReplacer(".", "_") + viper.SetEnvKeyReplacer(replacer) + + chaincodeLogLevelString := viper.GetString("logging.chaincode") + chaincodeLogLevel, err := logging.LogLevel(chaincodeLogLevelString) + + if err == nil { + s.chaincodeLogLevel = chaincodeLogLevel.String() + } else { + chaincodeLogger.Infof("chaincode logging level %s is invalid. defaulting to %s\n", chaincodeLogLevelString, flogging.DefaultLoggingLevel().String()) + s.chaincodeLogLevel = flogging.DefaultLoggingLevel().String() + } + return s } @@ -176,6 +193,7 @@ type ChaincodeSupport struct { peerTLSKeyFile string peerTLSSvrHostOrd string keepalive time.Duration + chaincodeLogLevel string } // DuplicateChaincodeHandlerError returned if attempt to register same chaincodeID while a stream already exists. @@ -294,6 +312,11 @@ func (chaincodeSupport *ChaincodeSupport) getArgsAndEnv(cID *pb.ChaincodeID, cLa } else { envs = append(envs, "CORE_PEER_TLS_ENABLED=false") } + + if chaincodeSupport.chaincodeLogLevel != "" { + envs = append(envs, "CORE_LOGGING_CHAINCODE="+chaincodeSupport.chaincodeLogLevel) + } + switch cLang { case pb.ChaincodeSpec_GOLANG, pb.ChaincodeSpec_CAR: //chaincode executable will be same as the name of the chaincode diff --git a/core/chaincode/chaincodetest.yaml b/core/chaincode/chaincodetest.yaml index 63b7983de97..18e7483632e 100644 --- a/core/chaincode/chaincodetest.yaml +++ b/core/chaincode/chaincodetest.yaml @@ -165,15 +165,6 @@ peer: # networkId: test networkId: dev - Dockerfile: | - from hyperledger/fabric-baseimage:latest - # Copy GOPATH src and install Peer - COPY src $GOPATH/src - RUN mkdir -p /var/hyperledger/db - WORKDIR $GOPATH/src/github.com/hyperledger/fabric/peer/ - RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml $GOPATH/bin - - # The Address this Peer will listen on listenAddress: 0.0.0.0:21212 # The Address this Peer will bind to for providing services @@ -363,17 +354,16 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - #from utxo:0.1.0 - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH car: # This is the basis for the CAR Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - FROM hyperledger/fabric-baseimage + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) # timeout in millisecs for starting up a container and waiting for Register # to come through. 1sec should be plenty for chaincode unit tests diff --git a/core/chaincode/exectransaction_test.go b/core/chaincode/exectransaction_test.go index f72983b9aab..381614de2df 100644 --- a/core/chaincode/exectransaction_test.go +++ b/core/chaincode/exectransaction_test.go @@ -61,7 +61,7 @@ func initMemSrvc() (net.Listener, error) { ca.CacheConfiguration() // Cache configuration aca := ca.NewACA() - eca := ca.NewECA() + eca := ca.NewECA(aca) tca := ca.NewTCA(eca) tlsca := ca.NewTLSCA(eca) @@ -286,8 +286,8 @@ func invoke(ctx context.Context, spec *pb.ChaincodeSpec, typ pb.Transaction_Type ledger, _ := ledger.GetLedger() ledger.BeginTxBatch("1") retval, ccevt, execErr = Execute(ctx, GetChain(DefaultChain), transaction) - if err != nil { - return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err) + if execErr != nil { + return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", execErr) } ledger.CommitTxBatch("1", []*pb.Transaction{transaction}, nil, nil) } @@ -376,9 +376,7 @@ func TestHTTPExecuteDeployTransaction(t *testing.T) { // itself or it won't be downloaded because it will be found // in GOPATH, which would defeat the test testDBWrapper.CleanDB(t) - //executeDeployTransaction(t, "http://github.com/hyperledger/fabric-test-resources/examples/chaincode/go/chaincode_example01") - // forked the above until the ChaincodeStubInterface change is accepted into the fabric-test-resources project - executeDeployTransaction(t, "http://github.com/gabre/fabric-test-resources/examples/chaincode/go/chaincode_example01") + executeDeployTransaction(t, "http://gopkg.in/mastersingh24/fabric-test-resources.v1") } // Check the correctness of the final state after transaction execution. @@ -829,6 +827,18 @@ func TestChaincodeInvokeChaincode(t *testing.T) { go grpcServer.Serve(lis) + err = chaincodeInvokeChaincode(t, "") + if err != nil { + t.Fail() + t.Logf("Failed chaincode invoke chaincode : %s", err) + closeListenerAndSleep(lis) + return + } + + closeListenerAndSleep(lis) +} + +func chaincodeInvokeChaincode(t *testing.T, user string) (err error) { var ctxt = context.Background() // Deploy first chaincode @@ -838,7 +848,7 @@ func TestChaincodeInvokeChaincode(t *testing.T) { f := "init" args := util.ToChaincodeArgs(f, "a", "100", "b", "200") - spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}} + spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user} _, err = deploy(ctxt, spec1) chaincodeID1 := spec1.ChaincodeID.Name @@ -846,7 +856,6 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Fail() t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) - closeListenerAndSleep(lis) return } @@ -861,7 +870,7 @@ func TestChaincodeInvokeChaincode(t *testing.T) { f = "init" args = util.ToChaincodeArgs(f, "e", "0") - spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}} + spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user} _, err = deploy(ctxt, spec2) chaincodeID2 := spec2.ChaincodeID.Name @@ -870,7 +879,6 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) return } @@ -880,7 +888,7 @@ func TestChaincodeInvokeChaincode(t *testing.T) { f = "invoke" args = util.ToChaincodeArgs(f, "e", "1") - spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}} + spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Args: args}, SecureContext: user} // Invoke chaincode var uuid string _, uuid, _, err = invoke(ctxt, spec2, pb.Transaction_CHAINCODE_INVOKE) @@ -890,7 +898,6 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Logf("Error invoking <%s>: %s", chaincodeID2, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) return } @@ -901,13 +908,67 @@ func TestChaincodeInvokeChaincode(t *testing.T) { t.Logf("Incorrect final state after transaction for <%s>: %s", chaincodeID1, err) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) return } GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1}) GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2}) - closeListenerAndSleep(lis) + + return +} + +func TestChaincodeInvokeChaincodeWithSec(t *testing.T) { + testDBWrapper.CleanDB(t) + viper.Set("security.enabled", "true") + + //Initialize crypto + if err := crypto.Init(); err != nil { + panic(fmt.Errorf("Failed initializing the crypto layer [%s]", err)) + } + + //set paths for memberservice to pick up + viper.Set("peer.fileSystemPath", filepath.Join(os.TempDir(), "hyperledger", "production")) + viper.Set("server.rootpath", filepath.Join(os.TempDir(), "ca")) + + var err error + var memSrvcLis net.Listener + if memSrvcLis, err = initMemSrvc(); err != nil { + t.Fail() + t.Logf("Error registering user %s", err) + return + } + + time.Sleep(2 * time.Second) + + var peerLis net.Listener + if peerLis, err = initPeer(); err != nil { + finitMemSrvc(memSrvcLis) + t.Fail() + t.Logf("Error registering user %s", err) + return + } + + if err = crypto.RegisterClient("jim", nil, "jim", "6avZQLwcUe9b"); err != nil { + finitMemSrvc(memSrvcLis) + finitPeer(peerLis) + t.Fail() + t.Logf("Error registering user %s", err) + return + } + + //login as jim and test chaincode-chaincode interaction with security + if err = chaincodeInvokeChaincode(t, "jim"); err != nil { + finitMemSrvc(memSrvcLis) + finitPeer(peerLis) + t.Fail() + t.Logf("Error executing test %s", err) + return + } + + //cleanup + finitMemSrvc(memSrvcLis) + finitPeer(peerLis) + } // Test the execution of a chaincode that invokes another chaincode with wrong parameters. Should receive error from diff --git a/core/chaincode/handler.go b/core/chaincode/handler.go index 0d725c2a644..b7810e589c9 100644 --- a/core/chaincode/handler.go +++ b/core/chaincode/handler.go @@ -168,9 +168,9 @@ func (handler *Handler) deleteRangeQueryIterator(txContext *transactionContext, delete(txContext.rangeQueryIteratorMap, txid) } -//THIS CAN BE REMOVED ONCE WE SUPPORT CONFIDENTIALITY WITH CC-CALLING-CC -//we dissallow chaincode-chaincode interactions till confidentiality implications are understood -func (handler *Handler) canCallChaincode(txid string) *pb.ChaincodeMessage { +//THIS CAN BE REMOVED ONCE WE FULL SUPPORT (Invoke and Query) CONFIDENTIALITY WITH CC-CALLING-CC +//Only invocation are allowed, not queries +func (handler *Handler) canCallChaincode(txid string, isQuery bool) *pb.ChaincodeMessage { secHelper := handler.chaincodeSupport.getSecHelper() if secHelper == nil { return nil @@ -183,7 +183,9 @@ func (handler *Handler) canCallChaincode(txid string) *pb.ChaincodeMessage { } else if txctx.transactionSecContext == nil { errMsg = fmt.Sprintf("[%s]Error transaction context is nil while checking for confidentiality. Sending %s", shorttxid(txid), pb.ChaincodeMessage_ERROR) } else if txctx.transactionSecContext.ConfidentialityLevel != pb.ConfidentialityLevel_PUBLIC { - errMsg = fmt.Sprintf("[%s]Error chaincode-chaincode interactions not supported for with privacy enabled. Sending %s", shorttxid(txid), pb.ChaincodeMessage_ERROR) + if isQuery { + errMsg = fmt.Sprintf("[%s]Error chaincode-chaincode interactions not supported for with privacy enabled. Sending %s", shorttxid(txid), pb.ChaincodeMessage_ERROR) + } } if errMsg != "" { @@ -216,10 +218,12 @@ func (handler *Handler) encryptOrDecrypt(encrypt bool, txid string, payload []by var err error if txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_DEPLOY { if enc, err = secHelper.GetStateEncryptor(handler.deployTXSecContext, handler.deployTXSecContext); err != nil { + chaincodeLogger.Errorf("error getting crypto encryptor for deploy tx :%s", err) return nil, fmt.Errorf("error getting crypto encryptor for deploy tx :%s", err) } } else if txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_INVOKE || txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_QUERY { if enc, err = secHelper.GetStateEncryptor(handler.deployTXSecContext, txctx.transactionSecContext); err != nil { + chaincodeLogger.Errorf("error getting crypto encryptor %s", err) return nil, fmt.Errorf("error getting crypto encryptor %s", err) } } else { @@ -1073,7 +1077,9 @@ func (handler *Handler) enterBusyState(e *fsm.Event, state string) { } } else if msg.Type.String() == pb.ChaincodeMessage_INVOKE_CHAINCODE.String() { //check and prohibit C-call-C for CONFIDENTIAL txs - if triggerNextStateMsg = handler.canCallChaincode(msg.Txid); triggerNextStateMsg != nil { + chaincodeLogger.Debugf("[%s] C-call-C", shorttxid(msg.Txid)) + + if triggerNextStateMsg = handler.canCallChaincode(msg.Txid, false); triggerNextStateMsg != nil { return } chaincodeSpec := &pb.ChaincodeSpec{} @@ -1087,12 +1093,21 @@ func (handler *Handler) enterBusyState(e *fsm.Event, state string) { // Get the chaincodeID to invoke newChaincodeID := chaincodeSpec.ChaincodeID.Name + chaincodeLogger.Debugf("[%s] C-call-C %s", shorttxid(msg.Txid), newChaincodeID) // Create the transaction object chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: chaincodeSpec} transaction, _ := pb.NewChaincodeExecute(chaincodeInvocationSpec, msg.Txid, pb.Transaction_CHAINCODE_INVOKE) - // Launch the new chaincode if not already running + tsc := handler.getTxContext(msg.Txid).transactionSecContext + + transaction.Nonce = tsc.Nonce + transaction.ConfidentialityLevel = tsc.ConfidentialityLevel + transaction.ConfidentialityProtocolVersion = tsc.ConfidentialityProtocolVersion + transaction.Metadata = tsc.Metadata + transaction.Cert = tsc.Cert + + // cd the new chaincode if not already running _, chaincodeInput, launchErr := handler.chaincodeSupport.Launch(context.Background(), transaction) if launchErr != nil { payload := []byte(launchErr.Error()) @@ -1244,7 +1259,7 @@ func (handler *Handler) initializeSecContext(tx, depTx *pb.Transaction) error { return nil } -func (handler *Handler) setChaincodeSecurityContext(tx *pb.Transaction, msg *pb.ChaincodeMessage) error { +func (handler *Handler) setChaincodeSecurityContext(tx, depTx *pb.Transaction, msg *pb.ChaincodeMessage) error { chaincodeLogger.Debug("setting chaincode security context...") if msg.SecurityContext == nil { msg.SecurityContext = &pb.ChaincodeSecurityContext{} @@ -1275,6 +1290,13 @@ func (handler *Handler) setChaincodeSecurityContext(tx *pb.Transaction, msg *pb. return err } + msg.SecurityContext.Payload = ctorMsgRaw + // TODO: add deploy metadata + if depTx != nil { + msg.SecurityContext.ParentMetadata = depTx.Metadata + } else { + msg.SecurityContext.ParentMetadata = handler.deployTXSecContext.Metadata + } msg.SecurityContext.Payload = ctorMsgRaw msg.SecurityContext.TxTimestamp = tx.Timestamp } @@ -1316,7 +1338,7 @@ func (handler *Handler) initOrReady(ctxt context.Context, txid string, initArgs } //if security is disabled the context elements will just be nil - if err := handler.setChaincodeSecurityContext(tx, ccMsg); err != nil { + if err := handler.setChaincodeSecurityContext(tx, depTx, ccMsg); err != nil { return nil, err } @@ -1344,7 +1366,7 @@ func (handler *Handler) handleQueryChaincode(msg *pb.ChaincodeMessage) { }() //check and prohibit C-call-C for CONFIDENTIAL txs - if serialSendMsg = handler.canCallChaincode(msg.Txid); serialSendMsg != nil { + if serialSendMsg = handler.canCallChaincode(msg.Txid, true); serialSendMsg != nil { return } @@ -1364,6 +1386,16 @@ func (handler *Handler) handleQueryChaincode(msg *pb.ChaincodeMessage) { chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: chaincodeSpec} transaction, _ := pb.NewChaincodeExecute(chaincodeInvocationSpec, msg.Txid, pb.Transaction_CHAINCODE_QUERY) + tsc := handler.getTxContext(msg.Txid).transactionSecContext + + transaction.Nonce = tsc.Nonce + transaction.ConfidentialityLevel = tsc.ConfidentialityLevel + transaction.ConfidentialityProtocolVersion = tsc.ConfidentialityProtocolVersion + transaction.Metadata = tsc.Metadata + transaction.Cert = tsc.Cert + + chaincodeLogger.Debugf("[%s]Invoking another chaincode", shorttxid(msg.Txid)) + // Launch the new chaincode if not already running _, chaincodeInput, launchErr := handler.chaincodeSupport.Launch(context.Background(), transaction) if launchErr != nil { @@ -1493,7 +1525,7 @@ func (handler *Handler) sendExecuteMessage(ctxt context.Context, msg *pb.Chainco } //if security is disabled the context elements will just be nil - if err := handler.setChaincodeSecurityContext(tx, msg); err != nil { + if err := handler.setChaincodeSecurityContext(tx, nil, msg); err != nil { return nil, err } diff --git a/core/chaincode/platforms/car/package.go b/core/chaincode/platforms/car/package.go index 017c7816308..343610eca82 100644 --- a/core/chaincode/platforms/car/package.go +++ b/core/chaincode/platforms/car/package.go @@ -28,7 +28,6 @@ import ( cutil "github.com/hyperledger/fabric/core/container/util" pb "github.com/hyperledger/fabric/protos" - "github.com/spf13/viper" ) func download(path string) (string, error) { @@ -78,7 +77,7 @@ func (carPlatform *Platform) WritePackage(spec *pb.ChaincodeSpec, tw *tar.Writer var buf []string //let the executable's name be chaincode ID's name - buf = append(buf, viper.GetString("chaincode.car.Dockerfile")) + buf = append(buf, cutil.GetDockerfileFromConfig("chaincode.car.Dockerfile")) buf = append(buf, "COPY package.car /tmp/package.car") buf = append(buf, fmt.Sprintf("RUN chaintool buildcar /tmp/package.car -o $GOPATH/bin/%s && rm /tmp/package.car", spec.ChaincodeID.Name)) diff --git a/core/chaincode/platforms/car/test/car_test.go b/core/chaincode/platforms/car/test/car_test.go index f480acd8f4c..b761b4581a3 100644 --- a/core/chaincode/platforms/car/test/car_test.go +++ b/core/chaincode/platforms/car/test/car_test.go @@ -32,9 +32,10 @@ func TestMain(m *testing.M) { } func TestCar_BuildImage(t *testing.T) { + vm, err := container.NewVM() // skipped until chaintool accepts ChaincodeStubInterface updates t.SkipNow() - vm, err := container.NewVM() + if err != nil { t.Fail() t.Logf("Error getting VM: %s", err) diff --git a/core/chaincode/platforms/golang/package.go b/core/chaincode/platforms/golang/package.go index 3754d288f35..69a663e1de0 100644 --- a/core/chaincode/platforms/golang/package.go +++ b/core/chaincode/platforms/golang/package.go @@ -69,7 +69,7 @@ func writeChaincodePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error { newRunLine = fmt.Sprintf("%s\nCOPY src/certs/cert.pem %s", newRunLine, viper.GetString("peer.tls.cert.file")) } - dockerFileContents := fmt.Sprintf("%s\n%s", viper.GetString("chaincode.golang.Dockerfile"), newRunLine) + dockerFileContents := fmt.Sprintf("%s\n%s", cutil.GetDockerfileFromConfig("chaincode.golang.Dockerfile"), newRunLine) dockerFileSize := int64(len([]byte(dockerFileContents))) //Make headers identical by using zero time diff --git a/core/chaincode/platforms/java/package.go b/core/chaincode/platforms/java/package.go index 29b73b8f95f..928ee067a64 100644 --- a/core/chaincode/platforms/java/package.go +++ b/core/chaincode/platforms/java/package.go @@ -44,7 +44,7 @@ func writeChaincodePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error { if viper.GetBool("security.enabled") { //todo } else { - buf = append(buf, viper.GetString("chaincode.java.Dockerfile")) + buf = append(buf, cutil.GetDockerfileFromConfig("chaincode.java.Dockerfile")) buf = append(buf, "COPY src /root") buf = append(buf, "RUN gradle -b build.gradle build") buf = append(buf, "RUN unzip -od /root build/distributions/Chaincode.zip") diff --git a/core/chaincode/shim/chaincode.go b/core/chaincode/shim/chaincode.go index 021bbe30ebe..ffe07960b29 100644 --- a/core/chaincode/shim/chaincode.go +++ b/core/chaincode/shim/chaincode.go @@ -28,9 +28,8 @@ import ( "strconv" "strings" - gp "google/protobuf" - "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/chaincode/shim/crypto/attr" "github.com/hyperledger/fabric/core/chaincode/shim/crypto/ecdsa" "github.com/hyperledger/fabric/core/comm" @@ -48,7 +47,7 @@ var chaincodeLogger = logging.MustGetLogger("shim") // ChaincodeStub is an object passed to chaincode for shim side handling of // APIs. type ChaincodeStub struct { - UUID string + TxID string securityContext *pb.ChaincodeSecurityContext chaincodeEvent *pb.ChaincodeEvent args [][]byte @@ -68,10 +67,7 @@ func Start(cc Chaincode) error { backendFormatter := logging.NewBackendFormatter(backend, format) logging.SetBackend(backendFormatter).SetLevel(logging.Level(shimLoggingLevel), "shim") - viper.SetEnvPrefix("CORE") - viper.AutomaticEnv() - replacer := strings.NewReplacer(".", "_") - viper.SetEnvKeyReplacer(replacer) + SetChaincodeLoggingLevel() flag.StringVar(&peerAddress, "peer.address", "", "peer address") @@ -97,11 +93,39 @@ func Start(cc Chaincode) error { } chaincodename := viper.GetString("chaincode.id.name") + if chaincodename == "" { + return fmt.Errorf("Error chaincode id not provided") + } err = chatWithPeer(chaincodename, stream, cc) return err } +// IsEnabledForLogLevel checks to see if the chaincodeLogger is enabled for a specific logging level +// used primarily for testing +func IsEnabledForLogLevel(logLevel string) bool { + lvl, _ := logging.LogLevel(logLevel) + return chaincodeLogger.IsEnabledFor(lvl) +} + +// SetChaincodeLoggingLevel sets the chaincode logging level to the value +// of CORE_LOGGING_CHAINCODE set from core.yaml by chaincode_support.go +func SetChaincodeLoggingLevel() { + viper.SetEnvPrefix("CORE") + viper.AutomaticEnv() + replacer := strings.NewReplacer(".", "_") + viper.SetEnvKeyReplacer(replacer) + + chaincodeLogLevelString := viper.GetString("logging.chaincode") + chaincodeLogLevel, err := LogLevel(chaincodeLogLevelString) + + if err == nil { + SetLoggingLevel(chaincodeLogLevel) + } else { + chaincodeLogger.Infof("error with chaincode log level: %s level= %s\n", err, chaincodeLogLevelString) + } +} + // StartInProc is an entry point for system chaincodes bootstrap. It is not an // API for chaincodes. func StartInProc(env []string, args []string, cc Chaincode, recv <-chan *pb.ChaincodeMessage, send chan<- *pb.ChaincodeMessage) error { @@ -230,8 +254,8 @@ func chatWithPeer(chaincodename string, stream PeerChaincodeStream, cc Chaincode // -- init stub --- // ChaincodeInvocation functionality -func (stub *ChaincodeStub) init(handler *Handler, uuid string, secContext *pb.ChaincodeSecurityContext) { - stub.UUID = uuid +func (stub *ChaincodeStub) init(handler *Handler, txid string, secContext *pb.ChaincodeSecurityContext) { + stub.TxID = txid stub.securityContext = secContext stub.args = [][]byte{} newCI := pb.ChaincodeInput{} @@ -249,10 +273,14 @@ func InitTestStub(funargs ...string) *ChaincodeStub { allargs := util.ToChaincodeArgs(funargs...) newCI := pb.ChaincodeInput{Args: allargs} pl, _ := proto.Marshal(&newCI) - stub.init(&Handler{}, "TEST-uuid", &pb.ChaincodeSecurityContext{Payload: pl}) + stub.init(&Handler{}, "TEST-txid", &pb.ChaincodeSecurityContext{Payload: pl}) return &stub } +func (stub *ChaincodeStub) GetTxID() string { + return stub.TxID +} + // --------- Security functions ---------- //CHAINCODE SEC INTERFACE FUNCS TOBE IMPLEMENTED BY ANGELO @@ -262,31 +290,31 @@ func InitTestStub(funargs ...string) *ChaincodeStub { // same transaction context; that is, chaincode calling chaincode doesn't // create a new transaction message. func (stub *ChaincodeStub) InvokeChaincode(chaincodeName string, args [][]byte) ([]byte, error) { - return stub.handler.handleInvokeChaincode(chaincodeName, args, stub.UUID) + return stub.handler.handleInvokeChaincode(chaincodeName, args, stub.TxID) } // QueryChaincode locally calls the specified chaincode `Query` using the // same transaction context; that is, chaincode calling chaincode doesn't // create a new transaction message. func (stub *ChaincodeStub) QueryChaincode(chaincodeName string, args [][]byte) ([]byte, error) { - return stub.handler.handleQueryChaincode(chaincodeName, args, stub.UUID) + return stub.handler.handleQueryChaincode(chaincodeName, args, stub.TxID) } // --------- State functions ---------- // GetState returns the byte array value specified by the `key`. func (stub *ChaincodeStub) GetState(key string) ([]byte, error) { - return stub.handler.handleGetState(key, stub.UUID) + return stub.handler.handleGetState(key, stub.TxID) } // PutState writes the specified `value` and `key` into the ledger. func (stub *ChaincodeStub) PutState(key string, value []byte) error { - return stub.handler.handlePutState(key, value, stub.UUID) + return stub.handler.handlePutState(key, value, stub.TxID) } // DelState removes the specified `key` and its value from the ledger. func (stub *ChaincodeStub) DelState(key string) error { - return stub.handler.handleDelState(key, stub.UUID) + return stub.handler.handleDelState(key, stub.TxID) } //ReadCertAttribute is used to read an specific attribute from the transaction certificate, *attributeName* is passed as input parameter to this function. @@ -337,11 +365,11 @@ type StateRangeQueryIterator struct { // between the startKey and endKey, inclusive. The order in which keys are // returned by the iterator is random. func (stub *ChaincodeStub) RangeQueryState(startKey, endKey string) (StateRangeQueryIteratorInterface, error) { - response, err := stub.handler.handleRangeQueryState(startKey, endKey, stub.UUID) + response, err := stub.handler.handleRangeQueryState(startKey, endKey, stub.TxID) if err != nil { return nil, err } - return &StateRangeQueryIterator{stub.handler, stub.UUID, response, 0}, nil + return &StateRangeQueryIterator{stub.handler, stub.TxID, response, 0}, nil } // HasNext returns true if the range query iterator contains additional keys @@ -696,7 +724,7 @@ func (stub *ChaincodeStub) GetPayload() ([]byte, error) { // GetTxTimestamp returns transaction created timestamp, which is currently // taken from the peer receiving the transaction. Note that this timestamp // may not be the same with the other peers' time. -func (stub *ChaincodeStub) GetTxTimestamp() (*gp.Timestamp, error) { +func (stub *ChaincodeStub) GetTxTimestamp() (*timestamp.Timestamp, error) { return stub.securityContext.TxTimestamp, nil } diff --git a/core/chaincode/shim/chaincode.pb.go b/core/chaincode/shim/chaincode.pb.go index 49597a7f865..72c2ff85014 100644 --- a/core/chaincode/shim/chaincode.pb.go +++ b/core/chaincode/shim/chaincode.pb.go @@ -25,6 +25,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type ColumnDefinition_Type int32 const ( @@ -59,6 +65,7 @@ var ColumnDefinition_Type_value = map[string]int32{ func (x ColumnDefinition_Type) String() string { return proto.EnumName(ColumnDefinition_Type_name, int32(x)) } +func (ColumnDefinition_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } type ColumnDefinition struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -66,18 +73,20 @@ type ColumnDefinition struct { Key bool `protobuf:"varint,3,opt,name=key" json:"key,omitempty"` } -func (m *ColumnDefinition) Reset() { *m = ColumnDefinition{} } -func (m *ColumnDefinition) String() string { return proto.CompactTextString(m) } -func (*ColumnDefinition) ProtoMessage() {} +func (m *ColumnDefinition) Reset() { *m = ColumnDefinition{} } +func (m *ColumnDefinition) String() string { return proto.CompactTextString(m) } +func (*ColumnDefinition) ProtoMessage() {} +func (*ColumnDefinition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type Table struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` ColumnDefinitions []*ColumnDefinition `protobuf:"bytes,2,rep,name=columnDefinitions" json:"columnDefinitions,omitempty"` } -func (m *Table) Reset() { *m = Table{} } -func (m *Table) String() string { return proto.CompactTextString(m) } -func (*Table) ProtoMessage() {} +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *Table) GetColumnDefinitions() []*ColumnDefinition { if m != nil { @@ -98,9 +107,10 @@ type Column struct { Value isColumn_Value `protobuf_oneof:"value"` } -func (m *Column) Reset() { *m = Column{} } -func (m *Column) String() string { return proto.CompactTextString(m) } -func (*Column) ProtoMessage() {} +func (m *Column) Reset() { *m = Column{} } +func (m *Column) String() string { return proto.CompactTextString(m) } +func (*Column) ProtoMessage() {} +func (*Column) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type isColumn_Value interface { isColumn_Value() @@ -193,8 +203,8 @@ func (m *Column) GetBool() bool { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*Column) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _Column_OneofMarshaler, _Column_OneofUnmarshaler, []interface{}{ +func (*Column) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Column_OneofMarshaler, _Column_OneofUnmarshaler, _Column_OneofSizer, []interface{}{ (*Column_String_)(nil), (*Column_Int32)(nil), (*Column_Int64)(nil), @@ -298,13 +308,48 @@ func _Column_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) } } +func _Column_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Column) + // value + switch x := m.Value.(type) { + case *Column_String_: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.String_))) + n += len(x.String_) + case *Column_Int32: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Int32)) + case *Column_Int64: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Int64)) + case *Column_Uint32: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Uint32)) + case *Column_Uint64: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Uint64)) + case *Column_Bytes: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Bytes))) + n += len(x.Bytes) + case *Column_Bool: + n += proto.SizeVarint(7<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type Row struct { Columns []*Column `protobuf:"bytes,1,rep,name=columns" json:"columns,omitempty"` } -func (m *Row) Reset() { *m = Row{} } -func (m *Row) String() string { return proto.CompactTextString(m) } -func (*Row) ProtoMessage() {} +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *Row) GetColumns() []*Column { if m != nil { @@ -314,5 +359,37 @@ func (m *Row) GetColumns() []*Column { } func init() { + proto.RegisterType((*ColumnDefinition)(nil), "shim.ColumnDefinition") + proto.RegisterType((*Table)(nil), "shim.Table") + proto.RegisterType((*Column)(nil), "shim.Column") + proto.RegisterType((*Row)(nil), "shim.Row") proto.RegisterEnum("shim.ColumnDefinition_Type", ColumnDefinition_Type_name, ColumnDefinition_Type_value) } + +func init() { proto.RegisterFile("chaincode.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x52, 0xcd, 0x6a, 0xea, 0x40, + 0x14, 0x36, 0x66, 0x12, 0xf5, 0x5c, 0xef, 0xbd, 0xe9, 0x50, 0x24, 0xd0, 0x4d, 0xc9, 0xa2, 0xb8, + 0x69, 0x0a, 0x51, 0x7c, 0x00, 0x6b, 0x69, 0x85, 0xa2, 0x30, 0xc6, 0x45, 0x97, 0x89, 0x9d, 0xd6, + 0xd0, 0x38, 0x23, 0x26, 0xb6, 0xf8, 0x70, 0x85, 0x3e, 0x5a, 0xcf, 0xcc, 0x24, 0x50, 0xac, 0xbb, + 0xf3, 0xfd, 0x9d, 0xcc, 0xf9, 0x08, 0xfc, 0x5f, 0xad, 0x93, 0x4c, 0xac, 0xe4, 0x33, 0x0f, 0xb7, + 0x3b, 0x59, 0x4a, 0x4a, 0x8a, 0x75, 0xb6, 0x09, 0xbe, 0x2c, 0xf0, 0x6e, 0x65, 0xbe, 0xdf, 0x88, + 0x09, 0x7f, 0xc9, 0x44, 0x56, 0x66, 0x52, 0x50, 0x0a, 0x44, 0x24, 0x1b, 0xee, 0x5b, 0x97, 0x56, + 0xbf, 0xc3, 0xf4, 0x4c, 0x6f, 0x80, 0x94, 0x87, 0x2d, 0xf7, 0x9b, 0xc8, 0xfd, 0x8b, 0x2e, 0x42, + 0x95, 0x0e, 0x8f, 0x93, 0x61, 0x8c, 0x16, 0xa6, 0x8d, 0xd4, 0x03, 0xfb, 0x8d, 0x1f, 0x7c, 0x1b, + 0xfd, 0x6d, 0xa6, 0xc6, 0x60, 0x09, 0x44, 0xe9, 0x14, 0xc0, 0x5d, 0xc4, 0x6c, 0x3a, 0xbb, 0xf7, + 0x1a, 0xb4, 0x03, 0xce, 0x74, 0x16, 0x0f, 0x22, 0xcf, 0xaa, 0xc6, 0xd1, 0xd0, 0x6b, 0x2a, 0xc7, + 0xd2, 0xd0, 0x76, 0x3d, 0x23, 0x4f, 0x94, 0x65, 0xfc, 0x14, 0xdf, 0x2d, 0x3c, 0x87, 0xb6, 0x81, + 0x8c, 0xe7, 0xf3, 0x47, 0xcf, 0x0d, 0x12, 0x70, 0xe2, 0x24, 0xcd, 0xf9, 0xc9, 0x67, 0x4f, 0xe0, + 0x6c, 0x75, 0xf4, 0xc8, 0x02, 0x6f, 0xb0, 0xfb, 0x7f, 0xa2, 0xde, 0xe9, 0x1b, 0xd8, 0xef, 0x40, + 0xf0, 0x69, 0x81, 0x6b, 0x7c, 0xd4, 0x07, 0xb7, 0x28, 0x77, 0x99, 0x78, 0x35, 0x9f, 0x79, 0x68, + 0xb0, 0x0a, 0xd3, 0x1e, 0x38, 0x99, 0x28, 0x07, 0x91, 0xae, 0xc8, 0x41, 0xc1, 0xc0, 0x8a, 0x1f, + 0x0d, 0x75, 0x15, 0x76, 0xc5, 0x8f, 0x86, 0x6a, 0xd3, 0xde, 0x04, 0x08, 0x0a, 0x7f, 0xd5, 0x26, + 0x83, 0x6b, 0x05, 0x23, 0x0e, 0x2a, 0xa4, 0x56, 0x30, 0x83, 0xbb, 0xd2, 0x43, 0xc9, 0x0b, 0xdf, + 0x45, 0xa1, 0xab, 0x76, 0x69, 0x48, 0xcf, 0x81, 0xa4, 0x52, 0xe6, 0x7e, 0x4b, 0xb5, 0x8d, 0xb4, + 0x46, 0xe3, 0x16, 0x38, 0xef, 0x49, 0xbe, 0xe7, 0xc1, 0x35, 0xd8, 0x4c, 0x7e, 0xd0, 0x2b, 0x68, + 0x99, 0xdb, 0x0a, 0x7c, 0xbc, 0xaa, 0xa0, 0xfb, 0xb3, 0x02, 0x56, 0x8b, 0xa9, 0xab, 0xff, 0x90, + 0xc1, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0xda, 0xb2, 0xc7, 0x34, 0x02, 0x00, 0x00, +} diff --git a/core/chaincode/shim/interfaces.go b/core/chaincode/shim/interfaces.go index 264fc0dcc59..ac0363483cb 100644 --- a/core/chaincode/shim/interfaces.go +++ b/core/chaincode/shim/interfaces.go @@ -18,8 +18,7 @@ limitations under the License. package shim import ( - gp "google/protobuf" - + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/chaincode/shim/crypto/attr" ) @@ -51,6 +50,9 @@ type ChaincodeStubInterface interface { // as parameters GetFunctionAndParameters() (string, []string) + // Get the transaction ID + GetTxID() string + // InvokeChaincode locally calls the specified chaincode `Invoke` using the // same transaction context; that is, chaincode calling chaincode doesn't // create a new transaction message. @@ -156,7 +158,7 @@ type ChaincodeStubInterface interface { // GetTxTimestamp returns transaction created timestamp, which is currently // taken from the peer receiving the transaction. Note that this timestamp // may not be the same with the other peers' time. - GetTxTimestamp() (*gp.Timestamp, error) + GetTxTimestamp() (*timestamp.Timestamp, error) // SetEvent saves the event to be sent when a transaction is made part of a block SetEvent(name string, payload []byte) error diff --git a/core/chaincode/shim/mockstub.go b/core/chaincode/shim/mockstub.go index 9afd774aa28..3e4f9d6d582 100644 --- a/core/chaincode/shim/mockstub.go +++ b/core/chaincode/shim/mockstub.go @@ -23,8 +23,7 @@ import ( "errors" "strings" - gp "google/protobuf" - + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/chaincode/shim/crypto/attr" "github.com/op/go-logging" ) @@ -55,8 +54,12 @@ type MockStub struct { Invokables map[string]*MockStub // stores a transaction uuid while being Invoked / Deployed - // TODO if a chaincode uses recursion this may need to be a stack of UUIDs or possibly a reference counting map - Uuid string + // TODO if a chaincode uses recursion this may need to be a stack of TxIDs or possibly a reference counting map + TxID string +} + +func (stub *MockStub) GetTxID() string { + return stub.TxID } func (stub *MockStub) GetArgs() [][]byte { @@ -86,13 +89,13 @@ func (stub *MockStub) GetFunctionAndParameters() (function string, params []stri // Used to indicate to a chaincode that it is part of a transaction. // This is important when chaincodes invoke each other. // MockStub doesn't support concurrent transactions at present. -func (stub *MockStub) MockTransactionStart(uuid string) { - stub.Uuid = uuid +func (stub *MockStub) MockTransactionStart(txid string) { + stub.TxID = txid } // End a mocked transaction, clearing the UUID. func (stub *MockStub) MockTransactionEnd(uuid string) { - stub.Uuid = "" + stub.TxID = "" } // Register a peer chaincode with this MockStub @@ -137,7 +140,7 @@ func (stub *MockStub) GetState(key string) ([]byte, error) { // PutState writes the specified `value` and `key` into the ledger. func (stub *MockStub) PutState(key string, value []byte) error { - if stub.Uuid == "" { + if stub.TxID == "" { mockLogger.Error("Cannot PutState without a transactions - call stub.MockTransactionStart()?") return errors.New("Cannot PutState without a transactions - call stub.MockTransactionStart()?") } @@ -261,7 +264,7 @@ func (stub *MockStub) InvokeChaincode(chaincodeName string, args [][]byte) ([]by otherStub := stub.Invokables[chaincodeName] mockLogger.Debug("MockStub", stub.Name, "Invoking peer chaincode", otherStub.Name, args) // function, strings := getFuncArgs(args) - bytes, err := otherStub.MockInvoke(stub.Uuid, args) + bytes, err := otherStub.MockInvoke(stub.TxID, args) mockLogger.Debug("MockStub", stub.Name, "Invoked peer chaincode", otherStub.Name, "got", bytes, err) return bytes, err } @@ -321,7 +324,7 @@ func (stub *MockStub) GetPayload() ([]byte, error) { } // Not implemented -func (stub *MockStub) GetTxTimestamp() (*gp.Timestamp, error) { +func (stub *MockStub) GetTxTimestamp() (*timestamp.Timestamp, error) { return nil, nil } diff --git a/core/chaincode/shim/mockstub_test.go b/core/chaincode/shim/mockstub_test.go index d9ded212691..65f664e9333 100644 --- a/core/chaincode/shim/mockstub_test.go +++ b/core/chaincode/shim/mockstub_test.go @@ -20,6 +20,8 @@ import ( "errors" "fmt" "testing" + + "github.com/spf13/viper" ) func createTable(stub ChaincodeStubInterface) error { @@ -174,3 +176,17 @@ func TestMockTable(t *testing.T) { } ***/ } + +// TestSetChaincodeLoggingLevel uses the utlity function defined in chaincode.go to +// set the chaincodeLogger's logging level +func TestSetChaincodeLoggingLevel(t *testing.T) { + // set log level to a non-default level + testLogLevelString := "debug" + viper.Set("logging.chaincode", testLogLevelString) + + SetChaincodeLoggingLevel() + + if !IsEnabledForLogLevel(testLogLevelString) { + t.FailNow() + } +} diff --git a/core/comm/connection.go b/core/comm/connection.go index 2916474f0b3..dc204434a47 100644 --- a/core/comm/connection.go +++ b/core/comm/connection.go @@ -16,7 +16,7 @@ const defaultTimeout = time.Second * 3 var commLogger = logging.MustGetLogger("comm") // NewClientConnectionWithAddress Returns a new grpc.ClientConn to the given address. -func NewClientConnectionWithAddress(peerAddress string, block bool, tslEnabled bool, creds credentials.TransportAuthenticator) (*grpc.ClientConn, error) { +func NewClientConnectionWithAddress(peerAddress string, block bool, tslEnabled bool, creds credentials.TransportCredentials) (*grpc.ClientConn, error) { var opts []grpc.DialOption if tslEnabled { opts = append(opts, grpc.WithTransportCredentials(creds)) @@ -35,12 +35,12 @@ func NewClientConnectionWithAddress(peerAddress string, block bool, tslEnabled b } // InitTLSForPeer returns TLS credentials for peer -func InitTLSForPeer() credentials.TransportAuthenticator { +func InitTLSForPeer() credentials.TransportCredentials { var sn string if viper.GetString("peer.tls.serverhostoverride") != "" { sn = viper.GetString("peer.tls.serverhostoverride") } - var creds credentials.TransportAuthenticator + var creds credentials.TransportCredentials if viper.GetString("peer.tls.cert.file") != "" { var err error creds, err = credentials.NewClientTLSFromFile(viper.GetString("peer.tls.cert.file"), sn) diff --git a/core/config.go b/core/config.go index 9bf1feb9251..6f45bde4d57 100644 --- a/core/config.go +++ b/core/config.go @@ -17,64 +17,10 @@ limitations under the License. package core import ( - "flag" - "fmt" - "runtime" - "strings" - "github.com/op/go-logging" "github.com/spf13/viper" ) -// Config the config wrapper structure -type Config struct { -} - -func init() { - -} - -// SetupTestLogging setup the logging during test execution -func SetupTestLogging() { - level, err := logging.LogLevel(viper.GetString("peer.logging.level")) - if err == nil { - // No error, use the setting - logging.SetLevel(level, "main") - logging.SetLevel(level, "server") - logging.SetLevel(level, "peer") - } else { - log.Warningf("Log level not recognized '%s', defaulting to %s: %s", viper.GetString("peer.logging.level"), logging.ERROR, err) - logging.SetLevel(logging.ERROR, "main") - logging.SetLevel(logging.ERROR, "server") - logging.SetLevel(logging.ERROR, "peer") - } -} - -// SetupTestConfig setup the config during test execution -func SetupTestConfig() { - flag.Parse() - - // Now set the configuration file - viper.SetEnvPrefix("HYPERLEDGER") - viper.AutomaticEnv() - replacer := strings.NewReplacer(".", "_") - viper.SetEnvKeyReplacer(replacer) - viper.SetConfigName("core") // name of config file (without extension) - viper.AddConfigPath("./") // path to look for the config file in - viper.AddConfigPath("./../") // path to look for the config file in - err := viper.ReadInConfig() // Find and read the config file - if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %s \n", err)) - } - - SetupTestLogging() - - // Set the number of maxprocs - var numProcsDesired = viper.GetInt("peer.gomaxprocs") - log.Debugf("setting Number of procs to %d, was %d\n", numProcsDesired, runtime.GOMAXPROCS(2)) - -} - // See fabric/core/peer/config.go for comments on the configuration caching // methodology. diff --git a/core/container/dockercontroller/dockercontroller.go b/core/container/dockercontroller/dockercontroller.go index 529aafe5b25..6c8c7b109a3 100644 --- a/core/container/dockercontroller/dockercontroller.go +++ b/core/container/dockercontroller/dockercontroller.go @@ -207,7 +207,7 @@ func (vm *DockerVM) Stop(ctxt context.Context, ccid ccintf.CCID, timeout uint, d id, _ := vm.GetVMName(ccid) client, err := cutil.NewDockerClient() if err != nil { - dockerLogger.Debugf("start - cannot create client %s", err) + dockerLogger.Debugf("stop - cannot create client %s", err) return err } id = strings.Replace(id, ":", "_", -1) diff --git a/core/container/dockercontroller/dockercontroller_test.go b/core/container/dockercontroller/dockercontroller_test.go index 6e4b5e3f867..a5724d8fd42 100644 --- a/core/container/dockercontroller/dockercontroller_test.go +++ b/core/container/dockercontroller/dockercontroller_test.go @@ -21,9 +21,9 @@ import ( "os" "testing" + "github.com/fsouza/go-dockerclient" "github.com/spf13/viper" - "github.com/fsouza/go-dockerclient" "github.com/hyperledger/fabric/core/config" "github.com/hyperledger/fabric/core/ledger/testutil" ) diff --git a/core/container/util/dockerutil.go b/core/container/util/dockerutil.go index fb650c81bb9..9d113b2b918 100644 --- a/core/container/util/dockerutil.go +++ b/core/container/util/dockerutil.go @@ -17,7 +17,11 @@ limitations under the License. package util import ( + "runtime" + "strings" + "github.com/fsouza/go-dockerclient" + "github.com/hyperledger/fabric/metadata" "github.com/spf13/viper" ) @@ -35,3 +39,30 @@ func NewDockerClient() (client *docker.Client, err error) { } return } + +// Our docker images retrieve $ARCH via "uname -m", which is typically "x86_64" for, well, x86_64. +// However, GOARCH uses "amd64". We therefore need to normalize any discrepancies between "uname -m" +// and GOARCH here. +var archRemap = map[string]string{ + "amd64": "x86_64", +} + +func getArch() string { + if remap, ok := archRemap[runtime.GOARCH]; ok { + return remap + } else { + return runtime.GOARCH + } +} + +func parseDockerfileTemplate(template string) string { + r := strings.NewReplacer( + "$(ARCH)", getArch(), + "$(PROJECT_VERSION)", metadata.Version) + + return r.Replace(template) +} + +func GetDockerfileFromConfig(path string) string { + return parseDockerfileTemplate(viper.GetString(path)) +} diff --git a/core/container/util/dockerutil_test.go b/core/container/util/dockerutil_test.go new file mode 100644 index 00000000000..8d909d08cdf --- /dev/null +++ b/core/container/util/dockerutil_test.go @@ -0,0 +1,31 @@ +/* +Copyright London Stock Exchange 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "testing" + + "github.com/hyperledger/fabric/metadata" +) + +func TestUtil_DockerfileTemplateParser(t *testing.T) { + expected := "FROM foo:" + getArch() + "-" + metadata.Version + actual := parseDockerfileTemplate("FROM foo:$(ARCH)-$(PROJECT_VERSION)") + if actual != expected { + t.Errorf("Error parsing Dockerfile Template. Expected \"%s\", got \"%s\"", expected, actual) + } +} diff --git a/core/container/vm.go b/core/container/vm.go index 9075d1c1d2e..351186420e0 100644 --- a/core/container/vm.go +++ b/core/container/vm.go @@ -123,7 +123,7 @@ func (vm *VM) buildChaincodeContainerUsingDockerfilePackageBytes(spec *pb.Chainc } if err := vm.Client.BuildImage(opts); err != nil { vmLogger.Errorf("Failed Chaincode docker build:\n%s\n", outputbuf.String()) - return fmt.Errorf("Error building Chaincode container: %s", err) + return err } return nil } diff --git a/core/crypto/attributes/proto/attributes.pb.go b/core/crypto/attributes/proto/attributes.pb.go index 12388129f69..6a543a82362 100644 --- a/core/crypto/attributes/proto/attributes.pb.go +++ b/core/crypto/attributes/proto/attributes.pb.go @@ -23,27 +23,35 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // AttributesMetadataEntry is an entry within the metadata that store an attribute name with its respective key. type AttributesMetadataEntry struct { - AttributeName string `protobuf:"bytes,1,opt,name=AttributeName" json:"AttributeName,omitempty"` - AttributeKey []byte `protobuf:"bytes,2,opt,name=AttributeKey,proto3" json:"AttributeKey,omitempty"` + AttributeName string `protobuf:"bytes,1,opt,name=AttributeName,json=attributeName" json:"AttributeName,omitempty"` + AttributeKey []byte `protobuf:"bytes,2,opt,name=AttributeKey,json=attributeKey,proto3" json:"AttributeKey,omitempty"` } -func (m *AttributesMetadataEntry) Reset() { *m = AttributesMetadataEntry{} } -func (m *AttributesMetadataEntry) String() string { return proto.CompactTextString(m) } -func (*AttributesMetadataEntry) ProtoMessage() {} +func (m *AttributesMetadataEntry) Reset() { *m = AttributesMetadataEntry{} } +func (m *AttributesMetadataEntry) String() string { return proto.CompactTextString(m) } +func (*AttributesMetadataEntry) ProtoMessage() {} +func (*AttributesMetadataEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // AttributesMetadata holds both the original metadata bytes and the metadata required to access attributes. type AttributesMetadata struct { // Original metadata bytes - Metadata []byte `protobuf:"bytes,1,opt,name=Metadata,proto3" json:"Metadata,omitempty"` + Metadata []byte `protobuf:"bytes,1,opt,name=Metadata,json=metadata,proto3" json:"Metadata,omitempty"` // Entries for each attributes considered. - Entries []*AttributesMetadataEntry `protobuf:"bytes,2,rep,name=Entries" json:"Entries,omitempty"` + Entries []*AttributesMetadataEntry `protobuf:"bytes,2,rep,name=Entries,json=entries" json:"Entries,omitempty"` } -func (m *AttributesMetadata) Reset() { *m = AttributesMetadata{} } -func (m *AttributesMetadata) String() string { return proto.CompactTextString(m) } -func (*AttributesMetadata) ProtoMessage() {} +func (m *AttributesMetadata) Reset() { *m = AttributesMetadata{} } +func (m *AttributesMetadata) String() string { return proto.CompactTextString(m) } +func (*AttributesMetadata) ProtoMessage() {} +func (*AttributesMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *AttributesMetadata) GetEntries() []*AttributesMetadataEntry { if m != nil { @@ -51,3 +59,24 @@ func (m *AttributesMetadata) GetEntries() []*AttributesMetadataEntry { } return nil } + +func init() { + proto.RegisterType((*AttributesMetadataEntry)(nil), "protos.AttributesMetadataEntry") + proto.RegisterType((*AttributesMetadata)(nil), "protos.AttributesMetadata") +} + +func init() { proto.RegisterFile("attributes.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 158 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2c, 0x29, 0x29, + 0xca, 0x4c, 0x2a, 0x2d, 0x49, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x53, + 0xc5, 0x4a, 0xc9, 0x5c, 0xe2, 0x8e, 0x70, 0x39, 0xdf, 0xd4, 0x92, 0xc4, 0x94, 0xc4, 0x92, 0x44, + 0xd7, 0xbc, 0x92, 0xa2, 0x4a, 0x21, 0x15, 0x2e, 0x5e, 0xb8, 0x94, 0x5f, 0x62, 0x6e, 0xaa, 0x04, + 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x6f, 0x22, 0xb2, 0xa0, 0x90, 0x12, 0x17, 0x0f, 0x5c, 0x95, + 0x77, 0x6a, 0xa5, 0x04, 0x13, 0x50, 0x11, 0x4f, 0x10, 0x4f, 0x22, 0x92, 0x98, 0x52, 0x36, 0x97, + 0x10, 0xa6, 0x25, 0x42, 0x52, 0x5c, 0x1c, 0x30, 0x36, 0xd8, 0x68, 0x9e, 0x20, 0x8e, 0x5c, 0x98, + 0x9c, 0x25, 0x17, 0x3b, 0xc8, 0x11, 0x99, 0xa9, 0xc5, 0x40, 0x03, 0x99, 0x35, 0xb8, 0x8d, 0xe4, + 0x21, 0xee, 0x2e, 0xd6, 0xc3, 0xe1, 0xda, 0x20, 0xf6, 0x54, 0x88, 0xfa, 0x24, 0x88, 0xcf, 0x8c, + 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x6e, 0x61, 0x35, 0xf4, 0x00, 0x00, 0x00, +} diff --git a/core/crypto/client_tca.go b/core/crypto/client_tca.go index e862616f013..b929d7d32fe 100644 --- a/core/crypto/client_tca.go +++ b/core/crypto/client_tca.go @@ -26,11 +26,11 @@ import ( "errors" "fmt" - "google/protobuf" "math/big" "time" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/crypto/primitives" "golang.org/x/net/context" ) @@ -575,7 +575,7 @@ func (client *clientImpl) callTCACreateCertificateSet(num int, attributes []stri // Execute the protocol now := time.Now() - timestamp := google_protobuf.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} + timestamp := timestamp.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} req := &membersrvc.TCertCreateSetReq{ Ts: ×tamp, Id: &membersrvc.Identity{Id: client.enrollID}, diff --git a/core/crypto/client_tcert_pool_mt.go b/core/crypto/client_tcert_pool_mt.go index f3fb92c0613..6e60b62d49d 100644 --- a/core/crypto/client_tcert_pool_mt.go +++ b/core/crypto/client_tcert_pool_mt.go @@ -32,10 +32,20 @@ type tCertPoolEntry struct { client *clientImpl } +const maxFillerThreads = 5 + //NewTCertPoolEntry creates a new tcert pool entry func newTCertPoolEntry(client *clientImpl, attributes []string) *tCertPoolEntry { - tCertChannel := make(chan *TCertBlock, client.conf.getTCertBatchSize()*2) - tCertChannelFeedback := make(chan struct{}, client.conf.getTCertBatchSize()*2) + var tCertChannel chan *TCertBlock + var tCertChannelFeedback chan struct{} + + if client.conf.IsMultiChannelEnabled() { + tCertChannel = make(chan *TCertBlock, client.conf.getTCertBatchSize()*maxFillerThreads) + tCertChannelFeedback = make(chan struct{}, maxFillerThreads) + } else { + tCertChannel = make(chan *TCertBlock, client.conf.getTCertBatchSize()*2) + tCertChannelFeedback = make(chan struct{}, client.conf.getTCertBatchSize()*2) + } done := make(chan struct{}, 1) return &tCertPoolEntry{attributes, tCertChannel, tCertChannelFeedback, done, client} } @@ -90,9 +100,11 @@ func (tCertPoolEntry *tCertPoolEntry) GetNextTCert(attributes ...string) (tCertB tCertPoolEntry.client.Error("Failed getting a new TCert. Buffer is empty!") } if tCertBlock != nil { - // Send feedback to the filler - tCertPoolEntry.client.Debug("Send feedback") - tCertPoolEntry.tCertChannelFeedback <- struct{}{} + if !tCertPoolEntry.client.conf.IsMultiChannelEnabled() { + // Send feedback to the filler + tCertPoolEntry.client.Debug("Send feedback") + tCertPoolEntry.tCertChannelFeedback <- struct{}{} + } break } } @@ -175,14 +187,24 @@ func (tCertPoolEntry *tCertPoolEntry) filler() { tCertPoolEntry.client.Debug("Load unused TCerts...done!") if !stop { - ticker := time.NewTicker(1 * time.Second) + fillerThreads := 0 + var ticker *time.Ticker + if tCertPoolEntry.client.conf.IsMultiChannelEnabled() { + ticker = time.NewTicker(200 * time.Millisecond) + } else { + ticker = time.NewTicker(1 * time.Second) + } for { select { case <-tCertPoolEntry.done: stop = true tCertPoolEntry.client.Debug("Done signal.") case <-tCertPoolEntry.tCertChannelFeedback: - tCertPoolEntry.client.Debug("Feedback received. Time to check for tcerts") + if tCertPoolEntry.client.conf.IsMultiChannelEnabled() { + fillerThreads-- + } else { + tCertPoolEntry.client.Debug("Feedback received. Time to check for tcerts") + } case <-ticker.C: tCertPoolEntry.client.Debug("Time elapsed. Time to check for tcerts") } @@ -192,25 +214,39 @@ func (tCertPoolEntry *tCertPoolEntry) filler() { break } - if len(tCertPoolEntry.tCertChannel) < tCertPoolEntry.client.conf.getTCertBatchSize() { - tCertPoolEntry.client.Debugf("Refill TCert Pool. Current size [%d].", - len(tCertPoolEntry.tCertChannel), - ) + if tCertPoolEntry.client.conf.IsMultiChannelEnabled() { - var numTCerts = cap(tCertPoolEntry.tCertChannel) - len(tCertPoolEntry.tCertChannel) - if len(tCertPoolEntry.tCertChannel) == 0 { - numTCerts = cap(tCertPoolEntry.tCertChannel) / 10 - if numTCerts < 1 { - numTCerts = 1 - } - } + for len(tCertPoolEntry.tCertChannel) <= tCertPoolEntry.client.conf.getTCertBatchSize()*(maxFillerThreads-fillerThreads-1) { + tCertPoolEntry.client.Debugf("Refill TCert Pool. Current size [%d].", len(tCertPoolEntry.tCertChannel)) + numTCerts := tCertPoolEntry.client.conf.getTCertBatchSize() - tCertPoolEntry.client.Infof("Refilling [%d] TCerts.", numTCerts) + tCertPoolEntry.client.Infof("Refilling [%d] TCerts.", numTCerts) + fillerThreads++ - err := tCertPoolEntry.client.getTCertsFromTCA(calculateAttributesHash(tCertPoolEntry.attributes), tCertPoolEntry.attributes, numTCerts) - if err != nil { - tCertPoolEntry.client.Errorf("Failed getting TCerts from the TCA: [%s]", err) - break + go func() { + err := tCertPoolEntry.client.getTCertsFromTCA(calculateAttributesHash(tCertPoolEntry.attributes), tCertPoolEntry.attributes, numTCerts) + if err != nil { + tCertPoolEntry.client.Errorf("Failed getting TCerts from the TCA: [%s]", err) + } + tCertPoolEntry.tCertChannelFeedback <- struct{}{} + }() + } + } else { + if len(tCertPoolEntry.tCertChannel) < tCertPoolEntry.client.conf.getTCertBatchSize() { + tCertPoolEntry.client.Debugf("Refill TCert Pool. Current size [%d].", len(tCertPoolEntry.tCertChannel)) + var numTCerts = cap(tCertPoolEntry.tCertChannel) - len(tCertPoolEntry.tCertChannel) + if len(tCertPoolEntry.tCertChannel) == 0 { + numTCerts = cap(tCertPoolEntry.tCertChannel) / 10 + if numTCerts < 1 { + numTCerts = 1 + } + } + tCertPoolEntry.client.Infof("Refilling [%d] TCerts.", numTCerts) + + err := tCertPoolEntry.client.getTCertsFromTCA(calculateAttributesHash(tCertPoolEntry.attributes), tCertPoolEntry.attributes, numTCerts) + if err != nil { + tCertPoolEntry.client.Errorf("Failed getting TCerts from the TCA: [%s]", err) + } } } } diff --git a/core/crypto/crypto_test.go b/core/crypto/crypto_test.go index 4b83fd143d5..0584ad17fa0 100644 --- a/core/crypto/crypto_test.go +++ b/core/crypto/crypto_test.go @@ -88,6 +88,13 @@ func TestMain(m *testing.M) { if ret != 0 { os.Exit(ret) } + + // Third scenario (repeat the above) now also with 'security.multithreading.multichannel' enabled. + properties["security.multithreading.multichannel"] = "true" + ret = runTestsOnScenario(m, properties, "Using multithread + multichannel enabled") + if ret != 0 { + os.Exit(ret) + } properties["security.multithreading.enabled"] = "false" //Fourth scenario with security level = 384 @@ -1536,7 +1543,7 @@ func setup() { func initPKI() { ca.CacheConfiguration() // Need cache the configuration first aca = ca.NewACA() - eca = ca.NewECA() + eca = ca.NewECA(aca) tca = ca.NewTCA(eca) tlsca = ca.NewTLSCA(eca) } diff --git a/core/crypto/node_conf.go b/core/crypto/node_conf.go index ccb5dca5e95..a9382282ef0 100644 --- a/core/crypto/node_conf.go +++ b/core/crypto/node_conf.go @@ -62,6 +62,8 @@ type configuration struct { tlsServerName string multiThreading bool + multiChannel bool + tCertBatchSize int } @@ -152,6 +154,12 @@ func (conf *configuration) init() error { conf.multiThreading = viper.GetBool("security.multithreading.enabled") } + // Set multichannel + conf.multiChannel = false + if viper.IsSet("security.multithreading.multichannel") { + conf.multiChannel = viper.GetBool("security.multithreading.multichannel") + } + return nil } @@ -255,6 +263,10 @@ func (conf *configuration) IsMultithreadingEnabled() bool { return conf.multiThreading } +func (conf *configuration) IsMultiChannelEnabled() bool { + return conf.multiChannel +} + func (conf *configuration) getTCAServerName() string { return conf.tlsServerName } diff --git a/core/crypto/node_eca.go b/core/crypto/node_eca.go index 4b16b07cbd6..2c29fdd2d76 100644 --- a/core/crypto/node_eca.go +++ b/core/crypto/node_eca.go @@ -20,7 +20,6 @@ import ( "crypto/ecdsa" "crypto/rand" "crypto/x509" - "google/protobuf" "time" membersrvc "github.com/hyperledger/fabric/membersrvc/protos" @@ -30,6 +29,7 @@ import ( "io/ioutil" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/crypto/primitives" "github.com/hyperledger/fabric/core/crypto/primitives/ecies" "golang.org/x/net/context" @@ -356,7 +356,7 @@ func (node *nodeImpl) getEnrollmentCertificateFromECA(id, pw string) (interface{ } req := &membersrvc.ECertCreateReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &membersrvc.Identity{Id: id}, Tok: &membersrvc.Token{Tok: []byte(pw)}, Sign: &membersrvc.PublicKey{Type: membersrvc.CryptoType_ECDSA, Key: signPub}, diff --git a/core/crypto/node_tlsca.go b/core/crypto/node_tlsca.go index b7359d96ecf..647d37681f2 100644 --- a/core/crypto/node_tlsca.go +++ b/core/crypto/node_tlsca.go @@ -23,10 +23,10 @@ import ( "crypto/rand" "crypto/x509" "errors" - "google/protobuf" "time" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/crypto/primitives" "github.com/hyperledger/fabric/core/util" "golang.org/x/net/context" @@ -135,7 +135,7 @@ func (node *nodeImpl) getTLSCertificateFromTLSCA(id, affiliation string) (interf // Prepare the request pubraw, _ := x509.MarshalPKIXPublicKey(&priv.PublicKey) now := time.Now() - timestamp := google_protobuf.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} + timestamp := timestamp.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} req := &membersrvc.TLSCertCreateReq{ Ts: ×tamp, diff --git a/core/devops.go b/core/devops.go index d4b21498d4c..ab96713681e 100644 --- a/core/devops.go +++ b/core/devops.go @@ -113,7 +113,6 @@ func (*Devops) Build(context context.Context, spec *pb.ChaincodeSpec) (*pb.Chain codePackageBytes, err = vm.BuildChaincodeContainer(spec) if err != nil { - err = fmt.Errorf("Error getting chaincode package bytes: %s", err) devopsLogger.Error(fmt.Sprintf("%s", err)) return nil, err } diff --git a/core/endorser/endorser.yaml b/core/endorser/endorser.yaml index 0426f5c86f5..6b21eadfbc6 100644 --- a/core/endorser/endorser.yaml +++ b/core/endorser/endorser.yaml @@ -363,24 +363,31 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - #from utxo:0.1.0 - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH car: # This is the basis for the CAR Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - FROM hyperledger/fabric-baseimage + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + Dockerfile: | + from hyperledger/fabric-javaenv:$(ARCH)-$(PROJECT_VERSION) # timeout in millisecs for starting up a container and waiting for Register # to come through. 1sec should be plenty for chaincode unit tests - startuptimeout: 1000 + startuptimeout: 300000 #timeout in millisecs for deploying chaincode from a remote repository. - deploytimeout: 60000 + deploytimeout: 30000 #mode - options are "dev", "net" #dev - in dev mode, user runs the chaincode after starting validator from @@ -393,16 +400,18 @@ chaincode: # the image installpath: /opt/gopath/bin/ - #keepalive in seconds. In situations where the communiction goes through a - #proxy that does not support keep-alive, this parameter will maintain connection - #between peer and chaincode. - #A value <= 0 turns keepalive off - keepalive: 1 + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 # system chaincodes whitelist. To add system chaincode "myscc" to the # whitelist, add "myscc: enable" to the list system: lccc: enable + escc: enable + vscc: enable ############################################################################### # diff --git a/core/ledger/genesis/genesis_test.yaml b/core/ledger/genesis/genesis_test.yaml index 7a1010a7680..b78bbcc77fd 100644 --- a/core/ledger/genesis/genesis_test.yaml +++ b/core/ledger/genesis/genesis_test.yaml @@ -45,14 +45,6 @@ peer: # networkId: test networkId: dev - Dockerfile: | - from hyperledger/fabric-baseimage - # Copy GOPATH src and install Peer - COPY src $GOPATH/src - RUN mkdir -p /var/hyperledger/db - WORKDIR $GOPATH/src/github.com/hyperledger/fabric/peer/ - RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml $GOPATH/bin - # The Address this Peer will bind to for providing services address: 0.0.0.0:7051 # Whether the Peer should programmatically determine the address to bind to. This case is useful for docker containers. @@ -186,9 +178,9 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH #timeout for starting up a container and waiting for Register to come through startuptimeout: 20000 diff --git a/core/ledger/statemgmt/raw/state_impl.go b/core/ledger/statemgmt/raw/state_impl.go index 6885ec6f9f8..5229c226ee4 100644 --- a/core/ledger/statemgmt/raw/state_impl.go +++ b/core/ledger/statemgmt/raw/state_impl.go @@ -28,8 +28,8 @@ type StateImpl struct { stateDelta *statemgmt.StateDelta } -// NewRawState constructs new instance of raw state -func NewRawState() *StateImpl { +// NewStateImpl constructs new instance of raw state +func NewStateImpl() *StateImpl { return &StateImpl{} } diff --git a/core/ledger/statemgmt/state/config.go b/core/ledger/statemgmt/state/config.go index ab0a0279037..11fdd06f032 100644 --- a/core/ledger/statemgmt/state/config.go +++ b/core/ledger/statemgmt/state/config.go @@ -25,7 +25,7 @@ import ( var loadConfigOnce sync.Once -var stateImplName string +var stateImplName stateImplType var stateImplConfigs map[string]interface{} var deltaHistorySize int @@ -35,7 +35,7 @@ func initConfig() { func loadConfig() { logger.Info("Loading configurations...") - stateImplName = viper.GetString("ledger.state.dataStructure.name") + stateImplName = stateImplType(viper.GetString("ledger.state.dataStructure.name")) stateImplConfigs = viper.GetStringMap("ledger.state.dataStructure.configs") deltaHistorySize = viper.GetInt("ledger.state.deltaHistorySize") logger.Infof("Configurations loaded. stateImplName=[%s], stateImplConfigs=%s, deltaHistorySize=[%d]", @@ -44,7 +44,7 @@ func loadConfig() { if len(stateImplName) == 0 { stateImplName = defaultStateImpl stateImplConfigs = nil - } else if stateImplName != "buckettree" && stateImplName != "trie" && stateImplName != "raw" { + } else if stateImplName != buckettreeType && stateImplName != trieType && stateImplName != rawType { panic(fmt.Errorf("Error during initialization of state implementation. State data structure '%s' is not valid.", stateImplName)) } diff --git a/core/ledger/statemgmt/state/state.go b/core/ledger/statemgmt/state/state.go index 293a37e5fb4..4ec46b593fd 100644 --- a/core/ledger/statemgmt/state/state.go +++ b/core/ledger/statemgmt/state/state.go @@ -35,6 +35,14 @@ const defaultStateImpl = "buckettree" var stateImpl statemgmt.HashableState +type stateImplType string + +const ( + buckettreeType stateImplType = "buckettree" + trieType stateImplType = "trie" + rawType stateImplType = "raw" +) + // State structure for maintaining world state. // This encapsulates a particular implementation for managing the state persistence // This is not thread safe @@ -53,12 +61,12 @@ func NewState() *State { initConfig() logger.Infof("Initializing state implementation [%s]", stateImplName) switch stateImplName { - case "buckettree": + case buckettreeType: stateImpl = buckettree.NewStateImpl() - case "trie": - stateImpl = trie.NewStateTrie() - case "raw": - stateImpl = raw.NewRawState() + case trieType: + stateImpl = trie.NewStateImpl() + case rawType: + stateImpl = raw.NewStateImpl() default: panic("Should not reach here. Configs should have checked for the stateImplName being a valid names ") } @@ -81,13 +89,13 @@ func (state *State) TxBegin(txID string) { // TxFinish marks the completion of on-going tx. If txID is not same as of the on-going tx, this call panics func (state *State) TxFinish(txID string, txSuccessful bool) { - logger.Debugf("txFinish() for txUuid [%s], txSuccessful=[%t]", txID, txSuccessful) + logger.Debugf("txFinish() for txId [%s], txSuccessful=[%t]", txID, txSuccessful) if state.currentTxID != txID { - panic(fmt.Errorf("Different Uuid in tx-begin [%s] and tx-finish [%s]", state.currentTxID, txID)) + panic(fmt.Errorf("Different txId in tx-begin [%s] and tx-finish [%s]", state.currentTxID, txID)) } if txSuccessful { if !state.currentTxStateDelta.IsEmpty() { - logger.Debugf("txFinish() for txUuid [%s] merging state changes", txID) + logger.Debugf("txFinish() for txId [%s] merging state changes", txID) state.stateDelta.ApplyChanges(state.currentTxStateDelta) state.txStateDeltaHash[txID] = state.currentTxStateDelta.ComputeCryptoHash() state.updateStateImpl = true @@ -136,7 +144,7 @@ func (state *State) GetRangeScanIterator(chaincodeID string, startKey string, en stateImplItr), nil } -// Set sets state to given value for chaincodeID and key. Does not immideatly writes to DB +// Set sets state to given value for chaincodeID and key. Does not immediately writes to DB func (state *State) Set(chaincodeID string, key string, value []byte) error { logger.Debugf("set() chaincodeID=[%s], key=[%s], value=[%#v]", chaincodeID, key, value) if !state.txInProgress() { @@ -160,7 +168,7 @@ func (state *State) Set(chaincodeID string, key string, value []byte) error { return nil } -// Delete tracks the deletion of state for chaincodeID and key. Does not immideatly writes to DB +// Delete tracks the deletion of state for chaincodeID and key. Does not immediately writes to DB func (state *State) Delete(chaincodeID string, key string) error { logger.Debugf("delete() chaincodeID=[%s], key=[%s]", chaincodeID, key) if !state.txInProgress() { diff --git a/core/ledger/statemgmt/state_delta.go b/core/ledger/statemgmt/state_delta.go index 0f12d393f9d..dc9a84ea660 100644 --- a/core/ledger/statemgmt/state_delta.go +++ b/core/ledger/statemgmt/state_delta.go @@ -197,7 +197,7 @@ func (chaincodeStateDelta *ChaincodeStateDelta) set(key string, updatedValue, pr func (chaincodeStateDelta *ChaincodeStateDelta) remove(key string, previousValue []byte) { updatedKV, ok := chaincodeStateDelta.UpdatedKVs[key] if ok { - // Key already exists, just set the previous value + // Key already exists, just set the value updatedKV.Value = nil } else { // New key. Create a new entry in the map diff --git a/core/ledger/statemgmt/trie/pkg_test.go b/core/ledger/statemgmt/trie/pkg_test.go index 5f29b59b66f..a9763bcc0a6 100644 --- a/core/ledger/statemgmt/trie/pkg_test.go +++ b/core/ledger/statemgmt/trie/pkg_test.go @@ -36,7 +36,7 @@ type stateTrieTestWrapper struct { } func newStateTrieTestWrapper(t *testing.T) *stateTrieTestWrapper { - return &stateTrieTestWrapper{NewStateTrie(), t} + return &stateTrieTestWrapper{NewStateImpl(), t} } func (stateTrieTestWrapper *stateTrieTestWrapper) Get(chaincodeID string, key string) []byte { diff --git a/core/ledger/statemgmt/trie/state_trie.go b/core/ledger/statemgmt/trie/state_trie.go index ad200e8006d..e73d293952b 100644 --- a/core/ledger/statemgmt/trie/state_trie.go +++ b/core/ledger/statemgmt/trie/state_trie.go @@ -37,8 +37,8 @@ type StateTrie struct { recomputeCryptoHash bool } -// NewStateTrie contructs a new empty StateTrie -func NewStateTrie() *StateTrie { +// NewStateImpl contructs a new empty StateTrie +func NewStateImpl() *StateTrie { return &StateTrie{} } diff --git a/core/ledger/statemgmt/trie/state_trie_test.go b/core/ledger/statemgmt/trie/state_trie_test.go index a9c2f21102f..3be8b39d6c9 100644 --- a/core/ledger/statemgmt/trie/state_trie_test.go +++ b/core/ledger/statemgmt/trie/state_trie_test.go @@ -25,7 +25,7 @@ import ( func TestStateTrie_ComputeHash_AllInMemory_NoContents(t *testing.T) { testDBWrapper.CleanDB(t) - stateTrie := NewStateTrie() + stateTrie := NewStateImpl() stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t} hash := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(statemgmt.NewStateDelta()) testutil.AssertEquals(t, hash, nil) @@ -33,7 +33,7 @@ func TestStateTrie_ComputeHash_AllInMemory_NoContents(t *testing.T) { func TestStateTrie_ComputeHash_AllInMemory(t *testing.T) { testDBWrapper.CleanDB(t) - stateTrie := NewStateTrie() + stateTrie := NewStateImpl() stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t} stateDelta := statemgmt.NewStateDelta() @@ -76,7 +76,7 @@ func TestStateTrie_ComputeHash_AllInMemory(t *testing.T) { func TestStateTrie_GetSet_WithDB(t *testing.T) { testDBWrapper.CleanDB(t) - stateTrie := NewStateTrie() + stateTrie := NewStateImpl() stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t} stateDelta := statemgmt.NewStateDelta() stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil) @@ -100,7 +100,7 @@ func TestStateTrie_GetSet_WithDB(t *testing.T) { func TestStateTrie_ComputeHash_WithDB_Spread_Keys(t *testing.T) { testDBWrapper.CleanDB(t) - stateTrie := NewStateTrie() + stateTrie := NewStateImpl() stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t} // Add a few keys and write to DB @@ -180,7 +180,7 @@ func TestStateTrie_ComputeHash_WithDB_Spread_Keys(t *testing.T) { func TestStateTrie_ComputeHash_WithDB_Staggered_Keys(t *testing.T) { testDBWrapper.CleanDB(t) - stateTrie := NewStateTrie() + stateTrie := NewStateImpl() stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t} ///////////////////////////////////////////////////////// diff --git a/core/peer/handler.go b/core/peer/handler.go index 52d4a558177..29b3049f8e4 100644 --- a/core/peer/handler.go +++ b/core/peer/handler.go @@ -29,6 +29,8 @@ import ( pb "github.com/hyperledger/fabric/protos" ) +const DefaultSyncSnapshotTimeout time.Duration = 60 * time.Second + // Handler peer handler implementation. type Handler struct { chatMutex sync.Mutex @@ -43,11 +45,13 @@ type Handler struct { snapshotRequestHandler *syncStateSnapshotRequestHandler syncStateDeltasRequestHandler *syncStateDeltasHandler syncBlocksRequestHandler *syncBlocksRequestHandler + syncSnapshotTimeout time.Duration + lastIgnoredSnapshotCID *uint64 } // NewPeerHandler returns a new Peer handler // Is instance of HandlerFactory -func NewPeerHandler(coord MessageHandlerCoordinator, stream ChatStream, initiatedStream bool, nextHandler MessageHandler) (MessageHandler, error) { +func NewPeerHandler(coord MessageHandlerCoordinator, stream ChatStream, initiatedStream bool) (MessageHandler, error) { d := &Handler{ ChatStream: stream, @@ -56,6 +60,12 @@ func NewPeerHandler(coord MessageHandlerCoordinator, stream ChatStream, initiate } d.doneChan = make(chan struct{}) + if dur := viper.GetDuration("peer.sync.state.snapshot.writeTimeout"); dur == 0 { + d.syncSnapshotTimeout = DefaultSyncSnapshotTimeout + } else { + d.syncSnapshotTimeout = dur + } + d.snapshotRequestHandler = newSyncStateSnapshotRequestHandler() d.syncStateDeltasRequestHandler = newSyncStateDeltasHandler() d.syncBlocksRequestHandler = newSyncBlocksRequestHandler() @@ -494,22 +504,26 @@ func (d *Handler) beforeSyncStateSnapshot(e *fsm.Event) { peerLogger.Errorf("Error sending syncStateSnapshot to channel: %v", x) } }() - // Use non-blocking send, will WARN and close channel if missed message. + // Use blocking send and timeout, will WARN and close channel if write times out d.snapshotRequestHandler.Lock() defer d.snapshotRequestHandler.Unlock() + timer := time.NewTimer(d.syncSnapshotTimeout) // Make sure the correlationID matches if d.snapshotRequestHandler.shouldHandle(syncStateSnapshot.Request.CorrelationId) { select { case d.snapshotRequestHandler.channel <- syncStateSnapshot: - default: + case <-timer.C: // Was not able to write to the channel, in which case the Snapshot stream is incomplete, and must be discarded, closing the channel // without sending the terminating message which would have had an empty byte slice. - peerLogger.Warningf("Did NOT send SyncStateSnapshot message to channel for correlationId = %d, sequence = %d, closing channel as the message has been discarded", syncStateSnapshot.Request.CorrelationId, syncStateSnapshot.Sequence) + peerLogger.Warningf("Did NOT send SyncStateSnapshot message to channel for correlationId = %d, sequence = %d because we timed out reading, closing channel as the message has been discarded", syncStateSnapshot.Request.CorrelationId, syncStateSnapshot.Sequence) d.snapshotRequestHandler.reset() } } else { - //Ignore the message, does not match the current correlationId - peerLogger.Warningf("Ignoring SyncStateSnapshot message with correlationId = %d, sequence = %d, as current correlationId = %d", syncStateSnapshot.Request.CorrelationId, syncStateSnapshot.Sequence, d.snapshotRequestHandler.correlationID) + if d.lastIgnoredSnapshotCID == nil || *d.lastIgnoredSnapshotCID < syncStateSnapshot.Request.CorrelationId { + peerLogger.Warningf("Ignoring SyncStateSnapshot message with correlationId = %d, sequence = %d, as current correlationId = %d, future messages for this (and older ids) will be suppressed", syncStateSnapshot.Request.CorrelationId, syncStateSnapshot.Sequence, d.snapshotRequestHandler.correlationID) + d.lastIgnoredSnapshotCID = &syncStateSnapshot.Request.CorrelationId + //Ignore the message, does not match the current correlationId + } } } diff --git a/core/peer/peer.go b/core/peer/peer.go index a8ec366c9cb..3be5fca8249 100644 --- a/core/peer/peer.go +++ b/core/peer/peer.go @@ -177,7 +177,7 @@ type handlerMap struct { } // HandlerFactory for creating new MessageHandlers -type HandlerFactory func(MessageHandlerCoordinator, ChatStream, bool, MessageHandler) (MessageHandler, error) +type HandlerFactory func(MessageHandlerCoordinator, ChatStream, bool) (MessageHandler, error) // EngineFactory for creating new engines type EngineFactory func(MessageHandlerCoordinator) (Engine, error) @@ -597,7 +597,7 @@ func (p *Impl) chatWithPeer(address string) error { func (p *Impl) handleChat(ctx context.Context, stream ChatStream, initiatedStream bool) error { deadline, ok := ctx.Deadline() peerLogger.Debugf("Current context deadline = %s, ok = %v", deadline, ok) - handler, err := p.handlerFactory(p, stream, initiatedStream, nil) + handler, err := p.handlerFactory(p, stream, initiatedStream) if err != nil { return fmt.Errorf("Error creating handler during handleChat initiation: %s", err) } diff --git a/core/peer/statetransfer/statetransfer.go b/core/peer/statetransfer/statetransfer.go index fe77d743606..11bdca46724 100644 --- a/core/peer/statetransfer/statetransfer.go +++ b/core/peer/statetransfer/statetransfer.go @@ -97,7 +97,7 @@ type coordinatorImpl struct { // If the peerIDs are nil, then all peers are assumed to have the given block. // If the call returns an error, a boolean is included which indicates if the error may be transient and the caller should retry func (sts *coordinatorImpl) SyncToTarget(blockNumber uint64, blockHash []byte, peerIDs []*pb.PeerID) (error, bool) { - logger.Debugf("Syncing to target %x for block number %d with peers %v", blockHash, blockNumber, peerIDs) + logger.Infof("Syncing to target %x for block number %d with peers %v", blockHash, blockNumber, peerIDs) if !sts.inProgress { sts.currentStateBlockNumber = sts.stack.GetBlockchainSize() - 1 // The block height is one more than the latest block number @@ -410,7 +410,7 @@ func (sts *coordinatorImpl) syncBlocks(highBlock, lowBlock uint64, highHash []by func (sts *coordinatorImpl) syncBlockchainToTarget(blockSyncReq *blockSyncReq) { - logger.Debugf("Processing a blockSyncReq to block %d", blockSyncReq.blockNumber) + logger.Debugf("Processing a blockSyncReq to block %d through %d", blockSyncReq.blockNumber, blockSyncReq.reportOnBlock) blockchainSize := sts.stack.GetBlockchainSize() @@ -422,8 +422,42 @@ func (sts *coordinatorImpl) syncBlockchainToTarget(blockSyncReq *blockSyncReq) { panic("Our blockchain is already higher than a sync target, this is unlikely, but unimplemented") } } else { + blockCursor := blockSyncReq.blockNumber + validHash := blockSyncReq.firstBlockHash - _, _, err := sts.syncBlocks(blockSyncReq.blockNumber, blockSyncReq.reportOnBlock, blockSyncReq.firstBlockHash, blockSyncReq.peerIDs) + // Don't bother fetching blocks which are already here and valid + // This is especially useful at startup + for { + block, err := sts.stack.GetBlockByNumber(blockCursor) + if err != nil || block == nil { + // Need to fetch this block + break + } + bh, err := sts.stack.HashBlock(block) + if err != nil { + // Something wrong with this block + break + } + if !bytes.Equal(bh, validHash) { + // Block is corrupt + break + } + blockCursor-- + validHash = block.PreviousBlockHash + if blockCursor+1 == blockSyncReq.reportOnBlock { + break + } + } + + if blockCursor+1 <= blockSyncReq.blockNumber { + logger.Debugf("Skipped remote syncing of block %d through %d because they were already present and valid", blockSyncReq.blockNumber, blockCursor+1) + } + + var err error + // Note, this must accomodate blockCursor underflowing + if blockCursor+1 > blockSyncReq.reportOnBlock { + _, _, err = sts.syncBlocks(blockCursor, blockSyncReq.reportOnBlock, validHash, blockSyncReq.peerIDs) + } if nil != blockSyncReq.replyChan { logger.Debugf("Replying to blockSyncReq on reply channel with : %s", err) diff --git a/core/peer/statetransfer/statetransfer_test.go b/core/peer/statetransfer/statetransfer_test.go index 5e72d7b94fc..df8dce75afa 100644 --- a/core/peer/statetransfer/statetransfer_test.go +++ b/core/peer/statetransfer/statetransfer_test.go @@ -164,6 +164,75 @@ func makeSimpleFilter(failureTrigger mockRequest, failureType mockResponse) (fun } +func TestStartupValidStateGenesis(t *testing.T) { + mrls := createRemoteLedgers(2, 1) // No remote targets available + + // Test from blockheight of 1, with valid genesis block + ml := NewMockLedger(mrls, nil, t) + ml.PutBlock(0, SimpleGetBlock(0)) + + sts := newTestStateTransfer(ml, mrls) + defer sts.Stop() + if err := executeStateTransfer(sts, ml, 0, 0, mrls); nil != err { + t.Fatalf("Startup failure: %s", err) + } + +} + +func TestStartupValidStateExisting(t *testing.T) { + mrls := createRemoteLedgers(2, 1) // No remote targets available + + // Test from blockheight of 1, with valid genesis block + ml := NewMockLedger(mrls, nil, t) + height := uint64(50) + for i := uint64(0); i < height; i++ { + ml.PutBlock(i, SimpleGetBlock(i)) + } + ml.state = SimpleGetState(height - 1) + + sts := newTestStateTransfer(ml, mrls) + defer sts.Stop() + if err := executeStateTransfer(sts, ml, height-1, height-1, mrls); nil != err { + t.Fatalf("Startup failure: %s", err) + } + +} + +func TestStartupInvalidStateGenesis(t *testing.T) { + mrls := createRemoteLedgers(1, 3) + + // Test from blockheight of 1, with valid genesis block + ml := NewMockLedger(mrls, nil, t) + ml.PutBlock(0, SimpleGetBlock(0)) + ml.state = ^ml.state // Ensure the state is wrong + + sts := newTestStateTransfer(ml, mrls) + defer sts.Stop() + if err := executeStateTransfer(sts, ml, 0, 0, mrls); nil != err { + t.Fatalf("Startup failure: %s", err) + } + +} + +func TestStartupInvalidStateExisting(t *testing.T) { + mrls := createRemoteLedgers(1, 3) + + // Test from blockheight of 1, with valid genesis block + ml := NewMockLedger(mrls, nil, t) + height := uint64(50) + for i := uint64(0); i < height; i++ { + ml.PutBlock(i, SimpleGetBlock(i)) + } + ml.state = ^SimpleGetState(height - 1) // Ensure the state is wrong + + sts := newTestStateTransfer(ml, mrls) + defer sts.Stop() + if err := executeStateTransfer(sts, ml, height-1, height-1, mrls); nil != err { + t.Fatalf("Startup failure: %s", err) + } + +} + func TestCatchupSimple(t *testing.T) { mrls := createRemoteLedgers(1, 3) diff --git a/core/rest/api.go b/core/rest/api.go index fe14a36a842..a0223e9d8a4 100644 --- a/core/rest/api.go +++ b/core/rest/api.go @@ -19,11 +19,11 @@ package rest import ( "errors" "fmt" - "google/protobuf" "golang.org/x/net/context" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" "github.com/hyperledger/fabric/core/ledger" pb "github.com/hyperledger/fabric/protos" "github.com/spf13/viper" @@ -75,7 +75,7 @@ func NewOpenchainServerWithPeerInfo(peerServer PeerInfo) (*ServerOpenchain, erro // GetBlockchainInfo returns information about the blockchain ledger such as // height, current block hash, and previous block hash. -func (s *ServerOpenchain) GetBlockchainInfo(ctx context.Context, e *google_protobuf.Empty) (*pb.BlockchainInfo, error) { +func (s *ServerOpenchain) GetBlockchainInfo(ctx context.Context, e *empty.Empty) (*pb.BlockchainInfo, error) { blockchainInfo, err := s.ledger.GetBlockchainInfo() if blockchainInfo.Height == 0 { return nil, fmt.Errorf("No blocks in blockchain.") @@ -128,7 +128,7 @@ func (s *ServerOpenchain) GetBlockByNumber(ctx context.Context, num *pb.BlockNum // GetBlockCount returns the current number of blocks in the blockchain data // structure. -func (s *ServerOpenchain) GetBlockCount(ctx context.Context, e *google_protobuf.Empty) (*pb.BlockCount, error) { +func (s *ServerOpenchain) GetBlockCount(ctx context.Context, e *empty.Empty) (*pb.BlockCount, error) { // Total number of blocks in the blockchain. size := s.ledger.GetBlockchainSize() @@ -163,12 +163,12 @@ func (s *ServerOpenchain) GetTransactionByID(ctx context.Context, txID string) ( } // GetPeers returns a list of all peer nodes currently connected to the target peer. -func (s *ServerOpenchain) GetPeers(ctx context.Context, e *google_protobuf.Empty) (*pb.PeersMessage, error) { +func (s *ServerOpenchain) GetPeers(ctx context.Context, e *empty.Empty) (*pb.PeersMessage, error) { return s.peerInfo.GetPeers() } // GetPeerEndpoint returns PeerEndpoint info of target peer. -func (s *ServerOpenchain) GetPeerEndpoint(ctx context.Context, e *google_protobuf.Empty) (*pb.PeersMessage, error) { +func (s *ServerOpenchain) GetPeerEndpoint(ctx context.Context, e *empty.Empty) (*pb.PeersMessage, error) { peers := []*pb.PeerEndpoint{} peerEndpoint, err := s.peerInfo.GetPeerEndpoint() if err != nil { diff --git a/core/rest/api_test.go b/core/rest/api_test.go index c94b80b2173..5d304972bfb 100644 --- a/core/rest/api_test.go +++ b/core/rest/api_test.go @@ -19,10 +19,10 @@ package rest import ( "bytes" "fmt" - "google/protobuf" "os" "testing" + "github.com/golang/protobuf/ptypes/empty" "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/util" "github.com/hyperledger/fabric/protos" @@ -81,7 +81,7 @@ func TestServerOpenchain_API_GetBlockchainInfo(t *testing.T) { } // Attempt to retrieve the blockchain info. There are no blocks // in this blockchain, therefore this test should intentionally fail. - info, err := server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{}) + info, err := server.GetBlockchainInfo(context.Background(), &empty.Empty{}) if err != nil { // Success t.Logf("Error retrieving blockchain info: %s", err) @@ -94,7 +94,7 @@ func TestServerOpenchain_API_GetBlockchainInfo(t *testing.T) { // add 3 blocks to ledger. buildTestLedger1(ledger, t) // Attempt to retrieve the blockchain info. - info, err = server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{}) + info, err = server.GetBlockchainInfo(context.Background(), &empty.Empty{}) if err != nil { t.Logf("Error retrieving blockchain info: %s", err) t.Fail() @@ -105,7 +105,7 @@ func TestServerOpenchain_API_GetBlockchainInfo(t *testing.T) { // add 5 blocks more. buildTestLedger2(ledger, t) // Attempt to retrieve the blockchain info. - info, err = server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{}) + info, err = server.GetBlockchainInfo(context.Background(), &empty.Empty{}) if err != nil { t.Logf("Error retrieving blockchain info: %s", err) t.Fail() @@ -191,7 +191,7 @@ func TestServerOpenchain_API_GetBlockCount(t *testing.T) { // Retrieve the current number of blocks in the blockchain. There are no blocks // in this blockchain, therefore this test should intentionally fail. - count, err := server.GetBlockCount(context.Background(), &google_protobuf.Empty{}) + count, err := server.GetBlockCount(context.Background(), &empty.Empty{}) if err != nil { // Success t.Logf("Error retrieving BlockCount from blockchain: %s", err) @@ -204,7 +204,7 @@ func TestServerOpenchain_API_GetBlockCount(t *testing.T) { // Add three 3 blocks to ledger. buildTestLedger1(ledger, t) // Retrieve the current number of blocks in the blockchain. Must be 3. - count, err = server.GetBlockCount(context.Background(), &google_protobuf.Empty{}) + count, err = server.GetBlockCount(context.Background(), &empty.Empty{}) if err != nil { t.Logf("Error retrieving BlockCount from blockchain: %s", err) t.Fail() @@ -218,7 +218,7 @@ func TestServerOpenchain_API_GetBlockCount(t *testing.T) { // Add 5 more blocks to ledger. buildTestLedger2(ledger, t) // Retrieve the current number of blocks in the blockchain. Must be 5. - count, err = server.GetBlockCount(context.Background(), &google_protobuf.Empty{}) + count, err = server.GetBlockCount(context.Background(), &empty.Empty{}) if err != nil { t.Logf("Error retrieving BlockCount from blockchain: %s", err) t.Fail() diff --git a/core/rest/rest_api.go b/core/rest/rest_api.go index e6836764b6c..f41041b8a1e 100644 --- a/core/rest/rest_api.go +++ b/core/rest/rest_api.go @@ -20,7 +20,6 @@ import ( "encoding/json" "errors" "fmt" - "google/protobuf" "io" "io/ioutil" "net/http" @@ -37,6 +36,7 @@ import ( "github.com/op/go-logging" "github.com/spf13/viper" + "github.com/golang/protobuf/ptypes/empty" core "github.com/hyperledger/fabric/core" "github.com/hyperledger/fabric/core/chaincode" "github.com/hyperledger/fabric/core/comm" @@ -637,7 +637,7 @@ func (s *ServerOpenchainREST) GetTransactionCert(rw web.ResponseWriter, req *web // GetBlockchainInfo returns information about the blockchain ledger such as // height, current block hash, and previous block hash. func (s *ServerOpenchainREST) GetBlockchainInfo(rw web.ResponseWriter, req *web.Request) { - info, err := s.server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{}) + info, err := s.server.GetBlockchainInfo(context.Background(), &empty.Empty{}) encoder := json.NewEncoder(rw) @@ -1297,10 +1297,10 @@ func (s *ServerOpenchainREST) ProcessChaincode(rw web.ResponseWriter, req *web.R } // Extract the ChaincodeSpec from the params field - deploySpec := requestPayload.Params + ccSpec := requestPayload.Params // Process the chaincode deployment request and record the result - result = s.processChaincodeDeploy(deploySpec) + result = s.processChaincodeDeploy(ccSpec) } else { // @@ -1310,7 +1310,8 @@ func (s *ServerOpenchainREST) ProcessChaincode(rw web.ResponseWriter, req *web.R // Because chaincode invocation/query requests require a ChaincodeInvocationSpec // message instead of a ChaincodeSpec message, we must initialize it here // before proceeding. - invokequeryPayload := &pb.ChaincodeInvocationSpec{ChaincodeSpec: requestPayload.Params} + ccSpec := requestPayload.Params + invokequeryPayload := &pb.ChaincodeInvocationSpec{ChaincodeSpec: ccSpec} // Payload params field must contain a ChaincodeSpec message if invokequeryPayload.ChaincodeSpec == nil { @@ -1673,8 +1674,8 @@ func (s *ServerOpenchainREST) processChaincodeInvokeOrQuery(method string, spec // GetPeers returns a list of all peer nodes currently connected to the target peer, including itself func (s *ServerOpenchainREST) GetPeers(rw web.ResponseWriter, req *web.Request) { - peers, err := s.server.GetPeers(context.Background(), &google_protobuf.Empty{}) - currentPeer, err1 := s.server.GetPeerEndpoint(context.Background(), &google_protobuf.Empty{}) + peers, err := s.server.GetPeers(context.Background(), &empty.Empty{}) + currentPeer, err1 := s.server.GetPeerEndpoint(context.Background(), &empty.Empty{}) encoder := json.NewEncoder(rw) @@ -1733,11 +1734,6 @@ func buildOpenchainRESTRouter() *web.Router { router.Get("/chain", (*ServerOpenchainREST).GetBlockchainInfo) router.Get("/chain/blocks/:id", (*ServerOpenchainREST).GetBlockByNumber) - // The /devops endpoint is now considered deprecated and superseded by the /chaincode endpoint - router.Post("/devops/deploy", (*ServerOpenchainREST).Deploy) - router.Post("/devops/invoke", (*ServerOpenchainREST).Invoke) - router.Post("/devops/query", (*ServerOpenchainREST).Query) - // The /chaincode endpoint which superceedes the /devops endpoint from above router.Post("/chaincode", (*ServerOpenchainREST).ProcessChaincode) diff --git a/core/rest/rest_api.json b/core/rest/rest_api.json index d82e763b1a6..75ee953a86f 100644 --- a/core/rest/rest_api.json +++ b/core/rest/rest_api.json @@ -100,105 +100,6 @@ } } }, - "/devops/deploy": { - "post": { - "summary": "[DEPRECATED] Service endpoint for deploying Chaincode [DEPRECATED]", - "description": "The /devops/deploy endpoint receives Chaincode deployment requests. The ChaincodTXand the required entities are first packaged into a container and subsequently deployed to the blockchain. If the Chaincode build and deployment are successful, a confirmation message is returned. Otherwise, an error is displayed alongside with a reason for the failure. This service endpoint is being deprecated, please use the /chaincode endpoint instead.", - "tags": [ - "Chaincode" - ], - "operationId": "chaincodeDeploy", - "parameters": [{ - "name": "ChaincodeSpec", - "in": "body", - "description": "Chaincode specification message", - "required": true, - "schema": { - "$ref": "#/definitions/ChaincodeSpec" - } - }], - "responses": { - "200": { - "description": "Successfully deployed chainCode", - "schema": { - "$ref": "#/definitions/OK" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/devops/invoke": { - "post": { - "summary": "[DEPRECATED] Service endpoint for invoking Chaincode functions [DEPRECATED]", - "description": "The /devops/invoke endpoint receives requests for invoking functions in deployed Chaincodes. If the Chaincode function is invoked sucessfully, a transaction id is returned. Otherwise, an error is displayed alongside with a reason for the failure. This service endpoint is being deprecated, please use the /chaincode endpoint instead.", - "tags": [ - "Chaincode" - ], - "operationId": "chaincodeInvoke", - "parameters": [{ - "name": "ChaincodeInvocationSpec", - "in": "body", - "description": "Chaincode invocation message", - "required": true, - "schema": { - "$ref": "#/definitions/ChaincodeInvocationSpec" - } - }], - "responses": { - "200": { - "description": "Successfully invoked transaction", - "schema": { - "$ref": "#/definitions/OK" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/devops/query": { - "post": { - "summary": "[DEPRECATED] Service endpoint for querying Chaincode state [DEPRECATED]", - "description": "The /devops/query endpoint receives requests to query Chaincode state. The request triggers a query method on the target Chaincode, both identified in the required payload. If the query method is successful, the response defined within the method is returned. Otherwise, an error is displayed alongside with a reason for the failure. This service endpoint is being deprecated, please use the /chaincode endpoint instead.", - "tags": [ - "Chaincode" - ], - "operationId": "chaincodeQuery", - "parameters": [{ - "name": "ChaincodeInvocationSpec", - "in": "body", - "description": "Chaincode invocation message", - "required": true, - "schema": { - "$ref": "#/definitions/ChaincodeInvocationSpec" - } - }], - "responses": { - "200": { - "description": "Successfully queried chaincode", - "schema": { - "$ref": "#/definitions/OK" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, "/chaincode": { "post": { "summary": "Service endpoint for Chaincode operations", @@ -550,15 +451,9 @@ "type": "object", "properties": { "type": { - "type": "string", - "default": "GOLANG", - "example": "GOLANG", - "enum":[ - "UNDEFINED", - "GOLANG", - "NODE", - "JAVA" - ], + "type": "integer", + "default": 1, + "example": 1, "description": "Chaincode specification language." }, "chaincodeID": { diff --git a/core/rest/rest_api_test.go b/core/rest/rest_api_test.go index c6816f7b68b..be86b7ce16c 100644 --- a/core/rest/rest_api_test.go +++ b/core/rest/rest_api_test.go @@ -18,7 +18,6 @@ package rest import ( "bytes" - "encoding/base64" "encoding/json" "fmt" "io/ioutil" @@ -34,10 +33,10 @@ import ( "github.com/hyperledger/fabric/protos" ) -var failb64 string = base64.StdEncoding.EncodeToString([]byte("fail")) -var initb64 string = base64.StdEncoding.EncodeToString([]byte("Init")) -var change_ownerb64 string = base64.StdEncoding.EncodeToString([]byte("change_owner")) -var get_ownerb64 string = base64.StdEncoding.EncodeToString([]byte("get_owner")) +var fail_func string = "fail" +var init_func string = "Init" +var change_owner_func string = "change_owner" +var get_owner_func string = "get_owner" func performHTTPGet(t *testing.T, url string) []byte { response, err := http.Get(url) @@ -594,7 +593,7 @@ func TestServerOpenchainREST_API_Chaincode_Deploy(t *testing.T) { }, "ctorMsg": { "args": ["` + - initb64 + + init_func + `"] }, "secureContext": "myuser" @@ -621,7 +620,7 @@ func TestServerOpenchainREST_API_Chaincode_Deploy(t *testing.T) { }, "ctorMsg": { "args": ["` + - initb64 + + init_func + `"] } } @@ -644,7 +643,7 @@ func TestServerOpenchainREST_API_Chaincode_Deploy(t *testing.T) { }, "ctorMsg": { "args": ["` + - initb64 + + init_func + `"] }, "secureContext": "myuser" @@ -691,7 +690,7 @@ func TestServerOpenchainREST_API_Chaincode_Invoke(t *testing.T) { performHTTPPost(t, httpServer.URL+"/registrar", []byte(`{"enrollId":"myuser","enrollSecret":"password"}`)) // Test invoke with "fail" function - httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"invoke","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"args":["`+failb64+`"]},"secureContext":"myuser"}}`)) + httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"invoke","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"Function":"`+fail_func+`","args":[]},"secureContext":"myuser"}}`)) if httpResponse.StatusCode != http.StatusOK { t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode) } @@ -701,7 +700,7 @@ func TestServerOpenchainREST_API_Chaincode_Invoke(t *testing.T) { } // Test invoke with "change_owner" function - httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"invoke","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"args":["`+change_ownerb64+`"]},"secureContext":"myuser"}}`)) + httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"invoke","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"Function":"`+change_owner_func+`","args":[]},"secureContext":"myuser"}}`)) if httpResponse.StatusCode != http.StatusOK { t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode) } @@ -742,7 +741,7 @@ func TestServerOpenchainREST_API_Chaincode_Query(t *testing.T) { performHTTPPost(t, httpServer.URL+"/registrar", []byte(`{"enrollId":"myuser","enrollSecret":"password"}`)) // Test query with non-existing chaincode name - httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"non-existing"},"ctorMsg":{"args":["`+initb64+`"]},"secureContext":"myuser"}}`)) + httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"non-existing"},"ctorMsg":{"Function":"`+init_func+`","args":[]},"secureContext":"myuser"}}`)) if httpResponse.StatusCode != http.StatusOK { t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode) } @@ -752,7 +751,7 @@ func TestServerOpenchainREST_API_Chaincode_Query(t *testing.T) { } // Test query with fail function - httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"args":["`+failb64+`"]},"secureContext":"myuser"}}`)) + httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"Function":"`+fail_func+`","args":[]},"secureContext":"myuser"}}`)) if httpResponse.StatusCode != http.StatusOK { t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode) } @@ -765,7 +764,7 @@ func TestServerOpenchainREST_API_Chaincode_Query(t *testing.T) { } // Test query with get_owner function - httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"args":["`+get_ownerb64+`"]},"secureContext":"myuser"}}`)) + httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"Function":"`+get_owner_func+`","args":[]},"secureContext":"myuser"}}`)) if httpResponse.StatusCode != http.StatusOK { t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode) } diff --git a/core/rest/rest_test.yaml b/core/rest/rest_test.yaml index 8393e7332ff..6746cc73c74 100644 --- a/core/rest/rest_test.yaml +++ b/core/rest/rest_test.yaml @@ -296,17 +296,16 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - #from utxo:0.1.0 - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH car: # This is the basis for the CAR Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - FROM hyperledger/fabric-ccenv + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) # timeout in millisecs for starting up a container and waiting for Register # to come through. 1sec should be plenty for chaincode unit tests diff --git a/core/util/utils.go b/core/util/utils.go index 9156b9836f0..d8eca4752cc 100644 --- a/core/util/utils.go +++ b/core/util/utils.go @@ -25,8 +25,7 @@ import ( "strings" "time" - gp "google/protobuf" - + "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/crypto/sha3" ) @@ -78,11 +77,11 @@ func GenerateUUID() string { } // CreateUtcTimestamp returns a google/protobuf/Timestamp in UTC -func CreateUtcTimestamp() *gp.Timestamp { +func CreateUtcTimestamp() *timestamp.Timestamp { now := time.Now().UTC() secs := now.Unix() nanos := int32(now.UnixNano() - (secs * 1000000000)) - return &(gp.Timestamp{Seconds: secs, Nanos: nanos}) + return &(timestamp.Timestamp{Seconds: secs, Nanos: nanos}) } //GenerateHashFromSignature returns a hash of the combined parameters diff --git a/devenv/Vagrantfile b/devenv/Vagrantfile index 8d9d6999daf..54af341894a 100644 --- a/devenv/Vagrantfile +++ b/devenv/Vagrantfile @@ -27,7 +27,7 @@ cd #{SRCMOUNT}/devenv SCRIPT -baseimage_release = File.read '../images/base/release' +baseimage_release = File.read '../.baseimage-release' Vagrant.require_version ">= 1.7.4" Vagrant.configure('2') do |config| diff --git a/devenv/compile_protos.sh b/devenv/compile_protos.sh index ad833ecd8d0..cdee9780e41 100755 --- a/devenv/compile_protos.sh +++ b/devenv/compile_protos.sh @@ -5,7 +5,7 @@ set -eux # Compile protos ALL_PROTO_FILES="$(find . -name "*.proto" -exec readlink -f {} \;)" PROTO_DIRS="$(dirname $ALL_PROTO_FILES | sort | uniq)" -PROTO_DIRS_WITHOUT_JAVA_AND_SDK="$(printf '%s\n' $PROTO_DIRS | grep -v "shim/java" | grep -v "/sdk")" +PROTO_DIRS_WITHOUT_JAVA_AND_SDK="$(printf '%s\n' $PROTO_DIRS | grep -v "shim/java" | grep -v "/sdk" | grep -v "/vendor")" for dir in $PROTO_DIRS_WITHOUT_JAVA_AND_SDK; do cd "$dir" protoc --proto_path="$dir" --go_out=plugins=grpc:. "$dir"/*.proto diff --git a/devenv/failure-motd.in b/devenv/failure-motd.in new file mode 100644 index 00000000000..59afb85f287 --- /dev/null +++ b/devenv/failure-motd.in @@ -0,0 +1,40 @@ +########################################################## + ,.-""``""-., + / ,:,;;,;, \ DANGER DANGER + \ ';';;';' / WILL ROBINSON... + `'---;;---'` + <>_==""==_<> + _<<<<<>>>>>_ + .'____\==/____'. + _____|__ |__| __|______ + /C \\\\\\\\ |..| //////// C\ + \_C//////// |;;| \\\\\\\\C_/ + |____o|##|o____| + \ ___|~~|___ / + '>--------<' + {==_==_==_=} + {= -=_=-_==} + {=_=-}{=-=_} + {=_==}{-=_=} + }~~~~""~~~~{ + jgs }____::____{ + /` || `\ + | || | + | || | + | || | + '-----''-----' +########################################################## + +If you see this notice, it means that something is wrong +with your hyperledger/fabric development environment. + +Typically this indicates that something failed during +provisioning and your environment is incomplete. Builds, +execution, etc., may not operate as they were intended. +Please review the provisioning log and visit: + + https://goo.gl/yqjRC7 + +for more information on troubleshooting and solutions. + +########################################################## diff --git a/devenv/setup.sh b/devenv/setup.sh index cd5fc34d21a..591a01c4830 100755 --- a/devenv/setup.sh +++ b/devenv/setup.sh @@ -37,62 +37,8 @@ DEVENV_REVISION=`(cd /hyperledger/devenv; git rev-parse --short HEAD)` # Install WARNING before we start provisioning so that it # will remain active. We will remove the warning after # success -cat </etc/motd -########################################################## - ,.-""``""-., - / ,:,;;,;, \ DANGER DANGER - \ ';';;';' / WILL ROBINSON... - `'---;;---'` - <>_==""==_<> - _<<<<<>>>>>_ - .'____\==/____'. - _____|__ |__| __|______ - /C \\\\\\\\ |..| //////// C\ - \_C//////// |;;| \\\\\\\\C_/ - |____o|##|o____| - \ ___|~~|___ / - '>--------<' - {==_==_==_=} - {= -=_=-_==} - {=_=-}{=-=_} - {=_==}{-=_=} - }~~~~""~~~~{ - jgs }____::____{ - /` || `\ - | || | - | || | - | || | - '-----''-----' -########################################################## - -If you see this notice, it means that something is wrong -with your hyperledger/fabric development environment. - -Typically this indicates that something failed during -provisioning and your environment is incomplete. Builds, -execution, etc., may not operate as they were intended. -Please review the provisioning log and visit: - - https://goo.gl/yqjRC7 - -for more information on troubleshooting and solutions. - -########################################################## -EOF - - -# Update system -apt-get update -qq - -# Prep apt-get for docker install -apt-get install -y apt-transport-https ca-certificates -apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D - -# Add docker repository -echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > /etc/apt/sources.list.d/docker.list - -# Update system -apt-get update -qq +SCRIPT_DIR="$(readlink -f "$(dirname "$0")")" +cat "$SCRIPT_DIR/failure-motd.in" >> /etc/motd # Storage backend logic case "${DOCKER_STORAGE_BACKEND}" in @@ -113,9 +59,6 @@ case "${DOCKER_STORAGE_BACKEND}" in exit 1;; esac -# Install docker -apt-get install -y linux-image-extra-$(uname -r) apparmor docker-engine - # Configure docker DOCKER_OPTS="-s=${DOCKER_STORAGE_BACKEND_STRING} -r=true --api-cors-header='*' -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock ${DOCKER_OPTS}" sed -i.bak '/^DOCKER_OPTS=/{h;s|=.*|=\"'"${DOCKER_OPTS}"'\"|};${x;/^$/{s||DOCKER_OPTS=\"'"${DOCKER_OPTS}"'\"|;H};x}' /etc/default/docker @@ -126,9 +69,6 @@ usermod -a -G docker vagrant # Add vagrant user to the docker group # Test docker docker run --rm busybox echo All good -# Run our common setup -/hyperledger/scripts/provision/host.sh - # Set Go environment variables needed by other scripts export GOPATH="/opt/gopath" export GOROOT="/opt/go/" @@ -138,9 +78,12 @@ PATH=$GOROOT/bin:$GOPATH/bin:$PATH sudo mkdir -p /var/hyperledger sudo chown -R vagrant:vagrant /var/hyperledger -# Build the actual hyperledger peer (must be done before chown below) +# clean any previous builds as they may have image/.dummy files without +# the backing docker images (since we are, by definition, rebuilding the +# filesystem) and then ensure we have a fresh set of our go-tools. +# NOTE: This must be done before the chown below cd $GOPATH/src/github.com/hyperledger/fabric -make clean peer gotools +make clean gotools # Ensure permissions are set for GOPATH sudo chown -R vagrant:vagrant $GOPATH diff --git a/devenv/setupUbuntuOnPPC64le.sh b/devenv/setupUbuntuOnPPC64le.sh new file mode 100755 index 00000000000..4745c5a0f69 --- /dev/null +++ b/devenv/setupUbuntuOnPPC64le.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +# Development on Power (ppc64le) systems is done outside of vagrant, on the +# native OS. This script helps setup the dev env on ppc64le Ubuntu. +# +# See https://github.com/hyperledger/fabric/blob/master/docs/dev-setup/install.md#building-outside-of-vagrant- +# +# NOTE: This script assumes that +# - OS specific apt-sources / repositories are already set appropriately. +# - Host's GOPATH environment variable is set. +# +# To get started on a fresh Ubuntu install: +# mkdir -p $GOPATH/src/github.com/hyperledger +# cd $GOPATH/src/github.com/hyperledger +# git clone http://gerrit.hyperledger.org/r/fabric +# sudo ./fabric/devenv/setupUbuntuOnPPC64el.sh +# cd $GOPATH/src/github.com/hyperledger/fabric +# make dist-clean all + +if [ xroot != x$(whoami) ] +then + echo "You must run as root (Hint: Try prefix 'sudo' while executing the script)" + exit +fi + +if [ ! -d "$GOPATH/src/github.com/hyperledger/fabric" ] +then + echo "Ensure fabric code is under $GOPATH/src/github.com/hyperledger/fabric" + exit +fi + +##################################### +# Install pre-requisite OS packages # +##################################### +apt-get update +apt-get -y install software-properties-common curl git sudo wget + +##################################### +# Install and setup Docker services # +##################################### +# Along with docker.io, aufs-tools also needs to be installed as 'auplink' which is part of aufs-tools package gets invoked during behave tests. +apt-get -y install docker.io aufs-tools + +# Set DOCKER_OPTS and restart Docker daemon. +sed -i '/#DOCKER_OPTS=/a DOCKER_OPTS="-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock"' /etc/default/docker +systemctl restart docker + +#################################### +# Install Go and set env variable # +#################################### +# Golang binaries for ppc64le are publicly available from Unicamp and is recommended as it includes certain platform specific tuning/optimization. +# Alternativley package part of Ubuntu disto repo can also be used. +wget ftp://ftp.unicamp.br/pub/linuxpatch/toolchain/at/ubuntu/dists/trusty/at9.0/binary-ppc64el/advance-toolchain-at9.0-golang_9.0-3_ppc64el.deb +dpkg -i advance-toolchain-at9.0-golang_9.0-3_ppc64el.deb +rm -f advance-toolchain-at9.0-golang_9.0-3_ppc64el.deb + +# Create links under /usr/bin using update-alternatives +update-alternatives --install /usr/bin/go go /usr/local/go/bin/go 9 +update-alternatives --install /usr/bin/gofmt gofmt /usr/local/go/bin/gofmt 9 + +# Set the GOROOT env variable +export GOROOT="/usr/local/go" + +#################################### +# Build and install RocksDB # +#################################### + +apt-get -y install libsnappy-dev zlib1g-dev libbz2-dev "build-essential" + +cd /tmp +git clone https://github.com/facebook/rocksdb.git +cd rocksdb +git checkout tags/v4.1 +echo There were some bugs in 4.1. This was fixed in dev stream and newer releases like 4.5.1. +sed -ibak 's/ifneq ($(MACHINE),ppc64)/ifeq (,$(findstring ppc64,$(MACHINE)))/g' Makefile +PORTABLE=1 make shared_lib +INSTALL_PATH=/usr/local make install-shared +ldconfig +cd - ; rm -rf /tmp/rocksdb + +################################################ +# Install PIP tools, behave and docker-compose # +################################################ + +apt-get -y install python-pip +pip install --upgrade pip +pip install behave nose docker-compose + +pip install -I flask==0.10.1 python-dateutil==2.2 pytz==2014.3 pyyaml==3.10 couchdb==1.0 flask-cors==2.0.1 requests==2.4.3 grpcio==0.13.1 diff --git a/docs/API/AttributesUsage.md b/docs/API/AttributesUsage.md index fa3bd401dcc..b5774c618a8 100644 --- a/docs/API/AttributesUsage.md +++ b/docs/API/AttributesUsage.md @@ -93,6 +93,13 @@ To deploy a chaincode with attributes "company" and "position" it should be writ ``` +Or: + +``` +./peer chaincode deploy -u userName -n mycc -c '{"Args": ["init"]}' -a '["position", "company"]' + +``` + #### REST ``` @@ -145,6 +152,13 @@ To invoke "autorizable counter" with attributes "company" and "position" it shou ``` +Or: + +``` +./peer chaincode invoke -u userName -n mycc -c '{"Args": ["increment"]}' -a '["position", "company"]' + +``` + #### REST ``` @@ -202,6 +216,13 @@ To query "autorizable counter" with attributes "company" and "position" it shoul ``` +Or: + +``` +./peer chaincode query -u userName -n mycc -c '{"Args": ["read"]}' -a '["position", "company"]' + +``` + #### REST ``` diff --git a/docs/API/CoreAPI.md b/docs/API/CoreAPI.md index 90145d93a5f..4b622437003 100644 --- a/docs/API/CoreAPI.md +++ b/docs/API/CoreAPI.md @@ -7,9 +7,9 @@ This document covers the available APIs for interacting with a peer node. Three 1. [CLI](#cli) 2. [REST API](#rest-api) 3. [Node.js Application](#nodejs-application) - * [Using Swagger JS Plugin](#using-swagger-js-plugin) - * [Marbles Demo Application](#marbles-demo-application) - * [Commercial Paper Demo Application](#commercial-paper-demo-application) + * [Using Swagger JS Plugin](#using-swagger-js-plugin) + * [Marbles Demo Application](#marbles-demo-application) + * [Commercial Paper Demo Application](#commercial-paper-demo-application) **Note:** If you are working with APIs with security enabled, please review the [security setup instructions](https://github.com/hyperledger/fabric/blob/master/docs/Setup/Chaincode-setup.md#security-setup-optional) before proceeding. @@ -68,13 +68,28 @@ Command | **stdout** result in the event of success Deploy creates the docker image for the chaincode and subsequently deploys the package to the validating peer. An example is below. -`peer chaincode deploy -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}'` +``` +peer chaincode deploy -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}' +``` +Or: + +``` +peer chaincode deploy -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Args": ["init", "a","100", "b", "200"]}' +``` The response to the chaincode deploy command will contain the chaincode identifier (hash) which will be required on subsequent `chaincode invoke` and `chaincode query` commands in order to identify the deployed chaincode. With security enabled, modify the command to include the -u parameter passing the username of a logged in user as follows: -`peer chaincode deploy -u jim -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}'` +``` +peer chaincode deploy -u jim -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Function":"init", "Args": ["a","100", "b", "200"]}' +``` + +Or: + +``` +peer chaincode deploy -u jim -p github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 -c '{"Args": ["init", "a","100", "b", "200"]}' +``` **Note:** If your GOPATH environment variable contains more than one element, the chaincode must be found in the first one or deployment will fail. @@ -157,23 +172,19 @@ You can work with the REST API through any tool of your choice. For example, the To learn about the REST API through Swagger, please take a look at the Swagger document [here](https://github.com/hyperledger/fabric/blob/master/core/rest/rest_api.json). You can upload the service description file to the Swagger service directly or, if you prefer, you can set up Swagger locally by following the instructions [here](#to-set-up-swagger-ui). * [Block](#block) - * GET /chain/blocks/{Block} + * GET /chain/blocks/{Block} * [Blockchain](#blockchain) - * GET /chain -* [Devops](#devops-deprecated) [DEPRECATED] - * POST /devops/deploy - * POST /devops/invoke - * POST /devops/query + * GET /chain * [Chaincode](#chaincode) * POST /chaincode * [Network](#network) - * GET /network/peers + * GET /network/peers * [Registrar](#registrar) - * POST /registrar - * DELETE /registrar/{enrollmentID} - * GET /registrar/{enrollmentID} - * GET /registrar/{enrollmentID}/ecert - * GET /registrar/{enrollmentID}/tcert + * POST /registrar + * DELETE /registrar/{enrollmentID} + * GET /registrar/{enrollmentID} + * GET /registrar/{enrollmentID}/ecert + * GET /registrar/{enrollmentID}/tcert * [Transactions](#transactions) * GET /transactions/{UUID} @@ -207,138 +218,11 @@ message BlockchainInfo { } ``` -#### Devops [DEPRECATED] - -* **POST /devops/deploy** -* **POST /devops/invoke** -* **POST /devops/query** - -**[DEPRECATED] The /devops endpoints have been deprecated and are superseded by the [/chaincode](#chaincode) endpoint. Please use the [/chaincode](#chaincode) endpoint to deploy, invoke, and query a chaincode. [DEPRECATED]** - -Use the Devops APIs to deploy, invoke, and query a chaincode. The required [ChaincodeSpec](https://github.com/hyperledger/fabric/blob/master/protos/chaincode.proto#L60) and [ChaincodeInvocationSpec](https://github.com/hyperledger/fabric/blob/master/protos/chaincode.proto#L89) payloads are defined in [chaincode.proto](https://github.com/hyperledger/fabric/blob/master/protos/chaincode.proto). - -``` -message ChaincodeSpec { - - enum Type { - UNDEFINED = 0; - GOLANG = 1; - NODE = 2; - } - - Type type = 1; - ChaincodeID chaincodeID = 2; - ChaincodeInput ctorMsg = 3; - int32 timeout = 4; - string secureContext = 5; - ConfidentialityLevel confidentialityLevel = 6; -} -``` - -``` -message ChaincodeInvocationSpec { - ChaincodeSpec chaincodeSpec = 1; - //ChaincodeInput message = 2; -} -``` - -**Note:** The deploy transaction requires a 'path' parameter to locate the chaincode source-code in the file system, build it, and deploy it to the validating peers. On the other hand, invoke and query transactions require a 'name' parameter to reference the chaincode that has already been deployed. These 'path' and 'name' parameters are specified in the ChaincodeID, defined in [chaincode.proto](https://github.com/hyperledger/fabric/blob/master/protos/chaincode.proto#L41). The only exception to the aforementioned rule is when the peer is running in chaincode development mode (as opposed to production mode), i.e. the user starts the peer with `--peer-chaincodedev` and runs the chaincode manually in a separate terminal window. In that case, the deploy transaction requires a 'name' parameter that is specified by the end user. - -``` -message ChaincodeID { - //deploy transaction will use the path - string path = 1; - - //all other requests will use the name (really a hashcode) generated by - //the deploy transaction - string name = 2; -} -``` - -An example of a valid ChaincodeSpec message for a deployment transaction is shown below. The 'path' parameter specifies the location of the chaincode in the filesystem. Eventually, we imagine that the 'path' will represent a location on GitHub. - -``` -{ - "type": "GOLANG", - "chaincodeID":{ - "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" - }, - "ctorMsg": { - "function":"init", - "args":["a", "100", "b", "200"] - } - } -``` - -An example of a valid ChaincodeInvocationSpec message for an invocation transaction is shown below. Consult [chaincode.proto](https://github.com/hyperledger/fabric/blob/master/protos/chaincode.proto#L89) for more information. - -``` -{ - "chaincodeSpec":{ - "type": "GOLANG", - "chaincodeID":{ - "name":"mycc" - }, - "ctorMsg":{ - "function":"invoke", - "args":["a", "b", "10"] - } - } -} -``` - -With security enabled, modify each of the above payloads to include the secureContext element, passing the enrollmentID of a logged in user as follows: - -``` -{ - "chaincodeSpec":{ - "type": "GOLANG", - "chaincodeID":{ - "name":"mycc" - }, - "ctorMsg":{ - "function":"invoke", - "args":["a", "b", "10"] - }, - "secureContext": "jim" - } -} -``` - -**Note:** The deployment transaction will take a little time as the docker image is being created. - -The response to a deploy request is either a message containing a confirmation of successful execution or an error, containing a reason for the failure. The response to a successful deployment request also contains the assigned chaincode name (hash), which is to be used in subsequent invocation and query transactions. An example is below: - -``` -{ - "OK": "Successfully deployed chainCode.", - "message": "3940678a8dff854c5ca4365fe0e29771edccb16b2103578c9d9207fea56b10559b43ff5c3025e68917f5a959f2a121d6b19da573016401d9a028b4211e10b20a" -} -``` - -The response to an invoke request is either a message containing a confirmation of successful execution or an error, containing a reason for the failure. The response to an invoke request also contains the transaction identifier (UUID). An example is below: - -``` -{ - "OK": "Successfully invoked chainCode.", - "message": "1ca30d0c-0153-46b4-8826-26dc7cfff852" -} -``` - -The response to a successful query request depends on the chaincode implementation. It may contain a string formatted value of a state variable, any string message, or not have an output. An example is below: - -``` -{ - "OK": "80" -} - -``` - #### Chaincode * **POST /chaincode** -Use the /chaincode endpoint to deploy, invoke, and query a target chaincode. This endpoint supersedes the [/devops](#devops-deprecated) endpoints and should be used for all chaincode operations. This service endpoint implements the [JSON RPC 2.0 specification](http://www.jsonrpc.org/specification) with the payload identifying the desired chaincode operation within the `method` field. The supported methods are `deploy`, `invoke`, and `query`. +Use the /chaincode endpoint to deploy, invoke, and query a target chaincode. This service endpoint implements the [JSON RPC 2.0 specification](http://www.jsonrpc.org/specification) with the payload identifying the desired chaincode operation within the `method` field. The supported methods are `deploy`, `invoke`, and `query`. The /chaincode endpoint implements the [JSON RPC 2.0 specification](http://www.jsonrpc.org/specification) and as such, must have the required fields of `jsonrpc`, `method`, and in our case `params` supplied within the payload. The client should also add the `id` element within the payload if they wish to receive a response to the request. If the `id` element is missing from the request payload, the request is assumed to be a notification and the server will not produce a response. @@ -358,8 +242,7 @@ POST host:port/chaincode "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { - "function":"init", - "args":["a", "1000", "b", "2000"] + "args":["init", "a", "1000", "b", "2000"] } }, "id": 1 @@ -382,8 +265,7 @@ POST host:port/chaincode "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { - "function":"init", - "args":["a", "1000", "b", "2000"] + "args":["init", "a", "1000", "b", "2000"] }, "secureContext": "lukas" }, @@ -420,8 +302,7 @@ Chaincode Invocation Request without security enabled: "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"invoke", - "args":["a", "b", "100"] + "args":["invoke", "a", "b", "100"] } }, "id": 3 @@ -442,8 +323,7 @@ Chaincode Invocation Request with security enabled (add `secureContext` element) "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"invoke", - "args":["a", "b", "100"] + "args":["invoke", "a", "b", "100"] }, "secureContext": "lukas" }, @@ -480,8 +360,7 @@ Chaincode Query Request without security enabled: "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"query", - "args":["a"] + "args":["query", "a"] } }, "id": 5 @@ -502,8 +381,7 @@ Chaincode Query Request with security enabled (add `secureContext` element): "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { - "function":"query", - "args":["a"] + "args":["query", "a"] }, "secureContext": "lukas" }, diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 92102988acd..2bfad6ca9b1 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -10,8 +10,18 @@ before participating. It is important that we keep things civil. ## Getting a Linux Foundation account In order to participate in the development of the Hyperledger Fabric project, -you will need an [LF account](Gerrit/lf-account.md). This will give you single -sign-on to all the community tools, including Gerrit and Jira (coming soon!). +you will need an [LF account](Gerrit/lf-account.md). You will need to use +your LF ID to grant you access to all the Hyperledger community tools, including +[Gerrit](https://gerrit.hyperledger.org) and [Jira](https://jira.hyperledger.org). + +### Setting up your SSH key + +For Gerrit, you will want to register your public SSH key. Login to +[Gerrit](https://gerrit.hyperledger.org) +with your LF account, and click on your name in the upper right-hand corner +and then click 'Settings'. In the left-hand margin, you should see a link for +'SSH Public Keys'. Copy-n-paste your [public SSH key](https://help.github.com/articles/generating-an-ssh-key/) +into the window and press 'Add'. ## Getting help diff --git a/docs/Gerrit/gerrit.md b/docs/Gerrit/gerrit.md index 85c9afad423..f1d77386fd6 100644 --- a/docs/Gerrit/gerrit.md +++ b/docs/Gerrit/gerrit.md @@ -75,9 +75,11 @@ git checkout -b issue-nnnn please read [this document](changes.md). ``` -git commit -s +git commit -s -a ``` +Then input precise and readable commit msg and submit. + 6. Any code changes that affect documentation should be accompanied by corresponding changes (or additions) to the documentation and tests. This will ensure that if the merged PR is reversed, all traces of the change will diff --git a/docs/MAINTAINERS.md b/docs/MAINTAINERS.md index 587a3452c22..28bc4ee13df 100644 --- a/docs/MAINTAINERS.md +++ b/docs/MAINTAINERS.md @@ -11,3 +11,5 @@ | Gabor Hosszu | gabre || gabor@digitalasset.com | Simon Schubert | corecode || sis@zurich.ibm.com | Chris Ferris | christo4ferris | ChristopherFerris | chrisfer@us.ibm.com +| Srinivasan Muralidharan | muralisrini | muralisr | muralisr@us.ibm.com +| Gari Singh | mastersingh24 | mastersingh24 | gari.r.singh@gmail.com diff --git a/docs/Setup/Chaincode-setup.md b/docs/Setup/Chaincode-setup.md index 8c4dac74e6b..075aeff2296 100644 --- a/docs/Setup/Chaincode-setup.md +++ b/docs/Setup/Chaincode-setup.md @@ -4,7 +4,7 @@ Chaincode developers need a way to test and debug their chaincode without having The following instructions apply to _developing_ chaincode in Go or Java. They do not apply to running in a production environment. However, if _developing_ chaincode in Java, please see the [Java chaincode setup](https://github.com/hyperledger/fabric/blob/master/docs/Setup/JAVAChaincode.md) instructions first, to be sure your environment is properly configured. -**Note:** We have added support for [System chaincode](https://github.com/hyperledger/fabric/blob/master/docs/SystemChaincodes/noop.md). +**Note:** We have added support for [System chaincode](https://github.com/hyperledger/fabric/blob/master/docs/SystemChaincode-noop.md). ## Choices @@ -344,7 +344,7 @@ POST host:port/chaincode The deploy transaction initializes the chaincode by executing a target initializing function. Though the example shows "init", the name could be arbitrarily chosen by the chaincode developer. You should see the following output in the chaincode window: ``` - 2015/11/15 15:19:31 Received INIT(uuid:005dea42-d57f-4983-803e-3232e551bf61), initializing chaincode + Received INIT(uuid:005dea42-d57f-4983-803e-3232e551bf61), initializing chaincode Aval = 100, Bval = 200 ``` @@ -423,7 +423,7 @@ POST host:port/chaincode The invoking transaction runs the specified chaincode function name "invoke" with the arguments. This transaction transfers 10 units from A to B. You should see the following output in the chaincode window: ``` - 2015/11/15 15:39:11 Received RESPONSE. Payload 200, Uuid 075d72a4-4d1f-4a1d-a735-4f6f60d597a9 + Received RESPONSE. Payload 200, Uuid 075d72a4-4d1f-4a1d-a735-4f6f60d597a9 Aval = 90, Bval = 210 ``` diff --git a/docs/Setup/JAVAChaincode.md b/docs/Setup/JAVAChaincode.md index b62857b6140..519420126e7 100644 --- a/docs/Setup/JAVAChaincode.md +++ b/docs/Setup/JAVAChaincode.md @@ -74,15 +74,20 @@ Note: This guide generally assumes you have followed the Chaincode development e make peer peer node start --peer-chaincodedev ``` -3. Open the second Vagrant terminal, and change to Java shim root folder and run gradle build, +3. Open the second Vagrant terminal and build the Java shim layer and publish it to Local Maven Repo +``` +cd $GOPATH/src/github.com/hyperledger/fabric/core/chaincode/shim/java +gradle -b build.gradle clean +gradle -b build.gradle build +``` +4. Change to examples folder to build and run, ``` cd $GOPATH/src/github.com/hyperledger/fabric/examples/chaincode/java/SimpleSample gradle -b build.gradle build ``` -4. Run the SimpleSample chaincode using the `gradle -b build.gradle run` - -5. Open the third Vagrant terminal to run init and invoke on the chaincode +5. Run the SimpleSample chaincode using the `gradle -b build.gradle run` +6. Open the third Vagrant terminal to run init and invoke on the chaincode peer chaincode deploy -l java -n SimpleSample -c '{"Args": ["init", "a","100", "b", "200"]}' ``` diff --git a/docs/Setup/NodeSDK-setup.md b/docs/Setup/NodeSDK-setup.md old mode 100644 new mode 100755 index 57ecbee5f03..db94428933c --- a/docs/Setup/NodeSDK-setup.md +++ b/docs/Setup/NodeSDK-setup.md @@ -18,307 +18,11 @@ execute the following command: npm install -g hfc ``` -## Overview +See [Hyperledger fabric Node.js client SDK](../nodeSDK/node-sdk-guide.md) for more information. -The sections in this document are as follows: -* The [Getting Started](#getting-started) section is intended to help you -quickly get a feel for HFC, how to use it, and some of its common capabilities. -This is demonstrated by example. +## Hyperledger fabric network -* The [Getting Set Up](#getting-set-up) section shows you how to set up up your -environment and how to [run the unit tests](#running-unit-tests). Looking at the -implementation of the unit tests will also help you learn more about the APIs by -example, including asset management and confidentiality. - -## Getting Started -This purpose of this section is to help you quickly get a feel for HFC and how -you may use it. It is not intended to demonstrate all of its power, but to -demonstrate common use cases by example. - -### Some basic terminology -First, there is some basic terminology you should understand. In order to -transact on a hyperledger blockchain, you must first have an identity which has -been both **registered** and **enrolled**. - -Think of **registration** as *issuing a user invitation* to join a blockchain. -It consists of adding a new user name (also called an *enrollment ID*) to the -membership service configuration. This can be done programatically with the -`Member.register` method, or by adding the enrollment ID directly to the -[membersrvc.yaml](https://github.com/hyperledger/fabric/blob/master/membersrvc/membersrvc.yaml) -configuration file. - -Think of **enrollment** as *accepting a user invitation* to join a blockchain. -This is always done by the entity that will transact on the blockchain. This can -be done programatically via the `Member.enroll` method. - -### Learn by example -The best way to quickly learn HFC is by example. - -The following example demonstrates a typical web app. The web app authenticates -a user and then transacts on a blockchain on behalf of that user. - -``` -/** - * This example shows how to do the following in a web app. - * 1) At initialization time, enroll the web app with the block chain. - * The identity must have already been registered. - * 2) At run time, after a user has authenticated with the web app: - * a) register and enroll an identity for the user; - * b) use this identity to deploy, query, and invoke a chaincode. - */ - -// To include the package from your hyperledger fabric directory: -// var hfc = require("myFabricDir/sdk/node"); -// To include the package from npm: -// var hfc = require('hfc'); -var hfc = require('hfc'); - -// Create a client chain. -// The name can be anything as it is only used internally. -var chain = hfc.newChain("targetChain"); - -// Configure the KeyValStore which is used to store sensitive keys -// as so it is important to secure this storage. -// The FileKeyValStore is a simple file-based KeyValStore, but you -// can easily implement your own to store whereever you want. -chain.setKeyValStore( hfc.newFileKeyValStore('/tmp/keyValStore') ); - -// Set the URL for member services -chain.setMemberServicesUrl("grpc://localhost:7054"); - -// Add a peer's URL -chain.addPeer("grpc://localhost:7051"); - -// Enroll "WebAppAdmin" which is already registered because it is -// listed in fabric/membersrvc/membersrvc.yaml with its one time password. -// If "WebAppAdmin" has already been registered, this will still succeed -// because it stores the state in the KeyValStore -// (i.e. in '/tmp/keyValStore' in this sample). -chain.enroll("WebAppAdmin", "DJY27pEnl16d", function(err, webAppAdmin) { - if (err) return console.log("ERROR: failed to register %s: %s",err); - // Successfully enrolled WebAppAdmin during initialization. - // Set this user as the chain's registrar which is authorized to register other users. - chain.setRegistrar(webAppAdmin); - // Now begin listening for web app requests - listenForUserRequests(); -}); - -// Main web app function to listen for and handle requests -function listenForUserRequests() { - for (;;) { - // WebApp-specific logic goes here to await the next request. - // ... - // Assume that we received a request from an authenticated user - // 'userName', and determined that we need to invoke the chaincode - // with 'chaincodeID' and function named 'fcn' with arguments 'args'. - handleUserRequest(userName,chaincodeID,fcn,args); - } -} - -// Handle a user request -function handleUserRequest(userName, chaincodeID, fcn, args) { - // Register and enroll this user. - // If this user has already been registered and/or enrolled, this will - // still succeed because the state is kept in the KeyValStore - // (i.e. in '/tmp/keyValStore' in this sample). - var registrationRequest = { - enrollmentID: userName, - // Customize account & affiliation - account: "bank_a", - affiliation: "00001" - }; - chain.registerAndEnroll( registrationRequest, function(err, user) { - if (err) return console.log("ERROR: %s",err); - // Issue an invoke request - var invokeRequest = { - // Name (hash) required for invoke - chaincodeID: chaincodeID, - // Function to trigger - fcn: fcn, - // Parameters for the invoke function - args: args - }; - // Invoke the request from the user object. - var tx = user.invoke(invokeRequest); - // Listen for the 'submitted' event - tx.on('submitted', function(results) { - console.log("submitted invoke: %j",results); - }); - // Listen for the 'complete' event. - tx.on('complete', function(results) { - console.log("completed invoke: %j",results; - }); - // Listen for the 'error' event. - tx.on('error', function(err) { - console.log("error on invoke: %j",err); - }); - }); -} -``` - -## Getting Set Up - -First, you'll want to have a running peer node and CA. You can follow the -instructions for setting up a network -[here](https://github.com/hyperledger/fabric/blob/master/docs/Setup/ca-setup.md#Operating-the-CA), -and start a single peer node and CA. - -### Chaincode Deployment Directory Structure - -To have the chaincode deployment succeed in network mode, you must properly set -up the chaincode project outside of your Hyperledger Fabric source tree. The -following instructions will demonstrate how to properly set up the directory -structure to deploy *chaincode_example02* in network mode. - -The chaincode project must be placed under the `$GOPATH/src` directory. For -example, the -[chaincode_example02](https://github.com/hyperledger/fabric/blob/master/examples/chaincode/go/chaincode_example02/chaincode_example02.go) -project should be placed under `$GOPATH/src/` as shown below. - -``` -mkdir -p $GOPATH/src/github.com/chaincode_example02/ -cd $GOPATH/src/github.com/chaincode_example02 -curl GET https://raw.githubusercontent.com/hyperledger/fabric/master/examples/chaincode/go/chaincode_example02/chaincode_example02.go > chaincode_example02.go -``` - -After you have placed your chaincode project under the `$GOPATH/src`, you will need to vendor the dependencies. From the directory containing your chaincode source, run the following commands: - -``` -go get -u github.com/kardianos/govendor -cd $GOPATH/src/github.com/chaincode_example02 -govendor init -govendor fetch github.com/hyperledger/fabric -``` - -Now, execute `go build` to verify that all of the chaincode dependencies are -present. - -``` -go build -``` - -Next, we will switch over to the node sdk directory in the fabric repo to run -the node sdk tests, to make sure you have everything properly set up. Verify -that the -[chain-tests.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/chain-tests.js) -unit test file points to the correct chaincode project path. The default -directory is set to `github.com/chaincode_example02/` as shown below. If you -placed the sample chaincode elsewhere, then you will need to change that. - -``` -// Path to the local directory containing the chaincode project under $GOPATH -var testChaincodePath = "github.com/chaincode_example02/"; -``` - -**Note:** You will need to run `npm install` the first time you run the sdk -tests, in order to install all of the dependencies. Set the `DEPLOY_MODE` -environment variable to `net` and run the chain-tests as follows: - -``` -cd $GOPATH/src/github.com/hyperledger/fabric/sdk/node -npm install -export DEPLOY_MODE='net' -node test/unit/chain-tests.js | node_modules/.bin/tap-spec -``` - -### Enabling TLS - -If you wish to configure TLS with the Membership Services server, the following -steps are required: - -- Modify `$GOPATH/src/github.com/hyperledger/fabric/membersrvc/membersrvc.yaml` as follows: - -``` -server: - tls: - cert: - file: "/var/hyperledger/production/.membersrvc/tlsca.cert" - key: - file: "/var/hyperledger/production/.membersrvc/tlsca.priv" -``` - -To specify to the Membership Services (TLS) Certificate Authority (TLSCA) what -X.509 v3 Certificate (with a corresponding Private Key) to use: - -- Modify `$GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml` as follows: - -``` -peer: - pki: - tls: - enabled: true - rootcert: - file: "/var/hyperledger/production/.membersrvc/tlsca.cert" -``` - -To configure the peer to connect to the Membership Services server over TLS -(otherwise, the connection will fail). - -- Bootstrap your Membership Services and the peer. This is needed in order to -have the file *tlsca.cert* generated by the member services. - -- Copy `/var/hyperledger/production/.membersrvc/tlsca.cert` to -`$GOPATH/src/github.com/hyperledger/fabric/sdk/node`. - -*Note:* If you cleanup the folder `/var/hyperledger/production` then don't -forget to copy again the *tlsca.cert* file as described above. - -## Running Unit Tests -HFC includes a set of unit tests implemented with the -[tape framework](https://github.com/substack/tape). The -[unit test script](https://github.com/hyperledger/fabric/blob/master/sdk/node/bin/run-unit-tests.sh) -builds and runs both the membership service server and the peer node for you, -therefore you do not have to start those manually. - -### Running the SDK unit tests -HFC includes a set of unit tests implemented with the -[tape framework](https://github.com/substack/tape). To run the unit tests, -execute the following commands: - - cd $GOPATH/src/github.com/hyperledger/fabric - make node-sdk-unit-tests - -The following are brief descriptions of each of the unit tests that are being -run. - -#### registrar -The [registrar.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/registrar.js) -test case exercises registering users with Membership Services. It also tests -registering a designated registrar user which can then register additional users. - -#### chain-tests -The [chain-tests.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/chain-tests.js) -test case exercises the [chaincode_example02.go](https://github.com/hyperledger/fabric/tree/master/examples/chaincode/go/chaincode_example02) -chaincode when it has been deployed in both development mode and network mode. - -#### asset-mgmt -The [asset-mgmt.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/asset-mgmt.js) -test case exercises the [asset_management.go](https://github.com/hyperledger/fabric/tree/master/examples/chaincode/go/asset_management) -chaincode when it has been deployed in both development mode and network mode. - -#### asset-mgmt-with-roles -The [asset-mgmt-with-roles.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/asset-mgmt-with-roles.js) -test case exercises the [asset_management_with_roles.go](https://github.com/hyperledger/fabric/tree/master/examples/chaincode/go/asset_management_with_roles) -chaincode when it has been deployed in both development mode and network mode. - -#### Troublingshooting -If you see errors stating that the client has already been registered/enrolled, -keep in mind that you can perform the enrollment process only once, as the -enrollmentSecret is a one-time-use password. You will see these errors if you -have performed a user registration/enrollment and subsequently deleted the -cryptographic tokens stored on the client side. The next time you try to enroll, -errors similar to the ones below will be seen. - - ``` - Error: identity or token do not match - ``` - ``` - Error: user is already registered - ``` - -To address this, remove any stored cryptographic material from the CA server by -following the instructions [here](https://github.com/hyperledger/fabric/blob/master/docs/Setup/Chaincode-setup.md#removing-temporary-files-when-security-is-enabled). -You will also need to remove any of the cryptographic tokens stored on the -client side by deleting the KeyValStore directory. That directory is -configurable and is set to `/tmp/keyValStore` within the unit tests. +First, you'll want to have a running peer node and member services. The +instructions for setting up a network are +[here](Network-setup.md). You may also use the [self contained environment](../nodeSDK/node-sdk-self-contained.md) that provides the network. diff --git a/docs/SystemChaincodes/noop.md b/docs/SystemChaincode-noop.md similarity index 100% rename from docs/SystemChaincodes/noop.md rename to docs/SystemChaincode-noop.md diff --git a/docs/abstract_v1.md b/docs/abstract_v1.md new file mode 100644 index 00000000000..ea0dd563d64 --- /dev/null +++ b/docs/abstract_v1.md @@ -0,0 +1,32 @@ +# HYPERLEDGER FABRIC v1.0 + +Hyperledger fabric is a platform that enables the delivery of a secure, robust, permissioned blockchain for the enterprise that incorporates a byzantine fault tolerant consensus. We have learned much as we progressed through the v0.6-preview release. In particular, that in order to provide for the scalability and confidentiality needs of many use cases, a refactoring of the architecture was needed. The v0.6-preview release will be the final (barring any bug fixes) release based upon the original architecture. + +Hyperledger fabric's v1.0 architecture has been designed to address two vital enterprise-grade requirements – **security** and **scalability**. Businesses and organizations can leverage this new architecture to execute confidential transactions on networks with shared or common assets – e.g. supply chain, FOREX market, healthcare, etc. The progression to v1.0 will be incremental, with myriad windows for community members to contribute code and start curating the fabric to fit specific business needs. + +## WHERE WE ARE: + +The current implementation involves every validating peer shouldering the responsibility for the full gauntlet of network functionality. They execute transactions, perform consensus, and maintain the shared ledger. Not only does this configuration lay a huge computational burden on each peer, hindering scalability, but it also constricts important facets of privacy and confidentiality. Namely, there is no mechanism to “channel” or “silo” confidential transactions. Every peer can see the logic for every transaction. + +## WHERE WE'RE GOING + +The new architecture introduces a clear functional separation of peer roles, and allows a transaction to pass through the network in a structured and modularized fashion. The peers are diverged into two distinct roles – Endorser & Committer. As an endorser, the peer will simulate the transaction and ensure that the outcome is both deterministic and stable. As a committer, the peer will validate the integrity of a transaction and then append to the ledger. Now confidential transactions can be sent to specific endorsers and their correlating committers, without the network being made cognizant of the transaction. Additionally, policies can be set to determine what levels of “endorsement” and “validation” are acceptable for a specific class of transactions. A failure to meet these thresholds would simply result in a transaction being withdrawn, rather than imploding or stagnating the entire network. This new model also introduces the possibility for more elaborate networks, such as a foreign exchange market. Entities may need to only participate as endorsers for their transactions, while leaving consensus and commitment (i.e. settlement in this scenario) to a trusted third party such as a clearing house. + +The consensus process (i.e. algorithmic computation) is entirely abstracted from the peer. This modularity not only provides a powerful security layer – the consenting nodes are agnostic to the transaction logic – but it also generates a framework where consensus can become pluggable and scalability can truly occur. There is no longer a parallel relationship between the number of peers in a network and the number of consenters. Now networks can grow dynamically (i.e. add endorsers and committers) without having to add corresponding consenters, and exist in a modular infrastructure designed to support high transaction throughput. Moreover, networks now have the capability to completely liberate themselves from the computational and legal burden of consensus by tapping into a pre-existing consensus cloud. + +As v1.0 manifests, we will see the foundation for interoperable blockchain networks that have the ability to scale and transact in a manner adherent with regulatory and industry standards. Watch how fabric v1.0 and the Hyperledger Project are building a true blockchain for business - + +[![HYPERLEDGERv1.0_ANIMATION](http://img.youtube.com/vi/EKa5Gh9whgU/0.jpg)](http://www.youtube.com/watch?v=EKa5Gh9whgU) + +## HOW TO CONTRIBUTE + +Use the following links to explore upcoming additions to fabric's codebase that will spawn the capabilities in v1.0: + +* Familiarize yourself with the [guidelines for code contributions](CONTRIBUTING.md) to this project. **Note**: In order to participate in the development of the Hyperledger fabric project, you will need an [LF account](Gerrit/lf-account.md). This will give you single +sign-on to JIRA and Gerrit. +* Explore the design document for the new [architecture](https://github.com/hyperledger-archives/fabric/wiki/Next-Consensus-Architecture-Proposal) +* Explore [JIRA](https://jira.hyperledger.org/projects/FAB/issues/) for open Hyperledger fabric issues. +* Explore the [JIRA](https://jira.hyperledger.org/projects/FAB/issues/) backlog for upcoming Hyperledger fabric issues. +* Explore [JIRA](https://jira.hyperledger.org/issues/?filter=10147) for Hyperledger fabric issues tagged with "help wanted." +* Explore the [source code](https://github.com/hyperledger/fabric) +* Explore the [documentation](http://hyperledger-fabric.readthedocs.io/en/latest/) diff --git a/docs/custom_theme/searchbox.html b/docs/custom_theme/searchbox.html new file mode 100644 index 00000000000..33ed3e2439c --- /dev/null +++ b/docs/custom_theme/searchbox.html @@ -0,0 +1,5 @@ +
+
+ +
+
diff --git a/docs/dev-setup/build.md b/docs/dev-setup/build.md index 6b1cea40903..642eaf79ec2 100644 --- a/docs/dev-setup/build.md +++ b/docs/dev-setup/build.md @@ -158,6 +158,21 @@ cd $GOPATH/src/github.com/hyperledger/fabric make peer unit-test behave ``` +### Building on Power Platform + +Development and build on Power (ppc64le) systems is done outside of vagrant as outlined [here](#building-outside-of-vagrant-). For ease of setting up the dev environment on Ubuntu, invoke [this script](https://github.com/hyperledger/fabric/tree/master/devenv/setupUbuntuOnPPC64le.sh) as root. This script has been validated on Ubuntu 16.04 and assumes certain things (like, development system has OS repositories in place, firewall setting etc) and in general can be improvised further. + +To get started on Power server installed with Ubuntu, first ensure you have properly setup your Host's [GOPATH environment variable](https://github.com/golang/go/wiki/GOPATH). Then, execute the following commands to build the fabric code: + +``` +mkdir -p $GOPATH/src/github.com/hyperledger +cd $GOPATH/src/github.com/hyperledger +git clone http://gerrit.hyperledger.org/r/fabric +sudo ./fabric/devenv/setupUbuntuOnPPC64le.sh +cd $GOPATH/src/github.com/hyperledger/fabric +make dist-clean all +``` + ### Building natively on OSX First, install Docker, as described [here](https://docs.docker.com/engine/installation/mac/). The database by default writes to /var/hyperledger. You can override this in the `core.yaml` configuration file, under `peer.fileSystemPath`. diff --git a/docs/images/standalone-app-developer.png b/docs/images/standalone-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/images/standalone-app-developer.png differ diff --git a/docs/images/web-app-developer.png b/docs/images/web-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/images/web-app-developer.png differ diff --git a/docs/index.md b/docs/index.md index b7edcb9486c..d9f5b96041f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -19,21 +19,25 @@ to host any mainstream language for smart contracts development. ## Releases -The fabric releases are documented -[here](https://github.com/hyperledger/fabric/wiki/Fabric-Releases). We have just -released our first release under the governance of the Hyperledger Project - -v0.5-developer-preview. +The fabric releases are documented [here](releases.md). We have just +released our second release under the governance of the Hyperledger Project - +v0.6-preview. + +## Fabric Starter Kit + +If you'd like to dive right in and get an operational experience on your local +server or laptop to begin development, we have just the thing for you. We have +created a standalone Docker-based [starter kit](starter/fabric-starter-kit.md) +that leverages the latest published Docker images that you can run on your +laptop and be up and running in no time. That should get you going with a +sample application and some simple chaincode. From there, you can go deeper +by exploring our [developer guides](#developer-guides). ## Contributing to the project We welcome contributions to the Hyperledger Project in many forms. There's always plenty to do! Full details of how to contribute to this project are -documented in the [Fabric developer's guide](#fabric-developer-guide) below. - -To contribute to this documentation, create an issue for any requests for -clarification or to highlight any errors, or you may clone and update the -[source](https://gerrit.hyperledger.org/r/#/admin/projects/fabric), and submit a -Gerrit review (essentially the same process as for fabric development). +documented in the [Fabric developer's guide](#fabric-developer's-guide) below. ## Maintainers @@ -45,42 +49,46 @@ Project's Technical Steering Committee (TSC). ## Communication We use [Hyperledger Slack](https://slack.hyperledger.org/) for communication and -Google Hangouts™ for screen sharing between developers. +Google Hangouts™ for screen sharing between developers. Our development +planning and prioritization is done in [JIRA](https://jira.hyperledger.org), +and we take longer running discussions/decisions to the +[mailing list](http://lists.hyperledger.org/mailman/listinfo/hyperledger-fabric). + +## Still Have Questions? +We try to maintain a comprehensive set of documentation (see below) for various audiences. +However, we realize that often there are questions that remain unanswered. For +any technical questions relating to the Hyperledger Fabric project not answered +in this documentation, please use +[StackOverflow](http://stackoverflow.com/questions/tagged/hyperledger). If you +need help finding things, please don't hesitate to send a note to the +[mailing list](http://lists.hyperledger.org/mailman/listinfo/hyperledger-fabric), +or ask on [Slack]((https://slack.hyperledger.org/)). # Hyperledger Fabric Documentation -The Hyperledger -[fabric](https://gerrit.hyperledger.org/r/#/admin/projects/fabric) is an -implementation of blockchain technology, that has been collaboratively developed -under the Linux Foundation's [Hyperledger Project](http://hyperledger.org). It -leverages familiar and proven technologies, and offers a modular architecture +The Hyperledger fabric is an implementation of blockchain technology, that has +been collaboratively developed under the Linux Foundation's +[Hyperledger Project](http://hyperledger.org). It leverages familiar and +proven technologies, and offers a modular architecture that allows pluggable implementations of various function including membership services, consensus, and smart contracts (Chaincode) execution. It features powerful container technology to host any mainstream language for smart contracts development. -## Still Have Questions? -We try to maintain a comprehensive set of documentation for various audiences. -However, we realize that often there are questions that remain unanswered. For -any technical questions relating to the Hyperledger Fabric project not answered -in this documentation, please use -[StackOverflow](http://stackoverflow.com/questions/tagged/hyperledger). - -## TOC +## Table of Contents Below, you'll find the following sections: -- [Getting started](#getting-started) -- [Quickstart](#quickstart-documentation) +- [Read All About It](#read-all-about-it) - [Developer guides](#developer-guides) - - [Fabric developer's guide](#fabric-developer-guide) - [Chaincode developer's guide](#chaincode-developer-guide) - - [API developer's guide](#api-developer-guide) + - [Application developer's guide](#application-developer-guide) + - [Fabric developer's guide](#fabric-developer-guide) - [Operations guide](#operations-guide) -# Getting started +## Read all about it If you are new to the project, you can begin by reviewing the following links. If you'd prefer to dive right in, see the @@ -96,47 +104,8 @@ where the community is developing use cases and requirements. the Fabric project's documentation. - [Fabric FAQs](https://github.com/hyperledger/fabric/tree/master/docs/FAQ) -# Quickstart documentation - -- [Development environment set-up](dev-setup/devenv.md): if you are considering -helping with development of the Hyperledger Fabric or Fabric-API projects -themselves, this guide will help you install and configure all you'll need. The -development environment is also useful (but, not necessary) for developing -blockchain applications and/or Chaincode. -- [Network setup](Setup/Network-setup.md): This document covers setting up a -network on your local machine for development. -- [Chaincode development environment](Setup/Chaincode-setup.md): Chaincode -developers need a way to test and debug their Chaincode without having to set up -a complete peer network. This document describes how to write, build, and test -Chaincode in a local development environment. -- [APIs](API/CoreAPI.md): This document covers the available APIs for -interacting with a peer node. - # Developer guides -## Fabric developer guide - -When you are ready to start contributing to the Hyperledger fabric project, we -strongly recommend that you read the [protocol specification](protocol-spec.md) -for the technical details so that you have a better understanding of how the -code fits together. - -- [Making code contributions](CONTRIBUTING.md): First, you'll want to familiarize -yourself with the project's contribution guidelines. -- [Setting up the development environment](dev-setup/devenv.md): after that, you -will want to set up your development environment. -- [Building the fabric core](dev-setup/build.md): next, try building the project -in your local development environment to ensure that everything is set up -correctly. -- [Building outside of Vagrant](dev-setup/build.md#building-outside-of-vagrant): -for the adventurous, you might try to build outside of the standard Vagrant -development environment. -- [Logging control](Setup/logging-control.md): describes how to tweak the logging -levels of various components within the fabric. -- [License header](dev-setup/headers.txt): every source file must include this -license header modified to include a copyright statement for the principle -author(s). - ## Chaincode developer guide - [Setting up the development environment](dev-setup/devenv.md): when developing @@ -152,22 +121,40 @@ testing Chaincode. - [Chaincode FAQ](FAQ/chaincode_FAQ.md): a FAQ for all of your burning questions relating to Chaincode. -## API developer guide - -- [APIs - CLI, REST, and Node.js](API/CoreAPI.md) - - [CLI](API/CoreAPI.md#cli): working with the command-line interface. - - [REST](API/CoreAPI.md#rest-api): working with the REST API. - - [Node.js SDK](https://github.com/hyperledger/fabric/blob/master/sdk/node/README.md): working with the Node.js SDK. - +## Application developer guide +``` + - [APIs - CLI, REST, and Node.js](API/CoreAPI.md) + - [CLI](API/CoreAPI.md#cli): working with the command-line interface. + - [REST](API/CoreAPI.md#rest-api): working with the REST API (*deprecated*). + - [Node.js SDK](nodeSDK/node-sdk-guide.md): working with the Node.js SDK. +``` +## Fabric developer guide +``` + - [Making code contributions](CONTRIBUTING.md): First, you'll want to familiarize + yourself with the project's contribution guidelines. + - [Setting up the development environment](dev-setup/devenv.md): after that, you + will want to set up your development environment. + - [Building the fabric core](dev-setup/build.md): next, try building the project + in your local development environment to ensure that everything is set up + correctly. + - [Building outside of Vagrant](dev-setup/build.md#building-outside-of-vagrant): + for the *adventurous*, you might try to build outside of the standard Vagrant + development environment. + - [Logging control](Setup/logging-control.md): describes how to tweak the logging + levels of various components within the fabric. + - [License header](dev-setup/headers.txt): every source file must include this + license header modified to include a copyright statement for the principle + author(s). +``` # Operations guide - -- [Setting Up a Network](Setup/Network-setup.md): instructions for setting up a -network of fabric peers. -- [Certificate Authority (CA) Setup](Setup/ca-setup.md): setting up a CA to -support identity, security (authentication/authorization), privacy and -confidentiality. -- [Application ACL](tech/application-ACL.md): working with access control lists. - -## License +``` + - [Setting Up a Network](Setup/Network-setup.md): instructions for setting up a + network of fabric peers. + - [Certificate Authority (CA) Setup](Setup/ca-setup.md): setting up a CA to + support identity, security (authentication/authorization), privacy and + confidentiality. + - [Application ACL](tech/application-ACL.md): working with access control lists. +``` +# License The Hyperledger Project uses the [Apache License Version 2.0](LICENSE) software license. diff --git a/docs/nodeSDK/app-developer-env-setup.md b/docs/nodeSDK/app-developer-env-setup.md new file mode 100755 index 00000000000..004db12b8b3 --- /dev/null +++ b/docs/nodeSDK/app-developer-env-setup.md @@ -0,0 +1,50 @@ +# Setting up the Full Hyperledger fabric Developer's Environment + + * See [Setting Up The Development Environment](../dev-setup/devenv.md) to set up your development environment. + + * The following commands are all issued from the vagrant environment. The following will open a terminal session: + +``` + cd /fabric/devenv + vagrant up + vagrant ssh +``` + + * Issue the following commands to build the Hyperledger fabric client (HFC) Node.js SDK including the API reference documentation + +``` + cd /opt/gopath/src/github.com/hyperledger/fabric/sdk/node + make all +``` + * Issue the following command where your Node.js application is located if you wish to use the `require("hfc")`, this will install the HFC locally. + +``` + npm install /opt/gopath/src/github.com/hyperledger/fabric/sdk/node +``` + + Or point to the HFC directly by using the following `require()` in your code: + +```javascript + require("/opt/gopath/src/github.com/hyperledger/fabric/sdk/node"); +``` + + * To build the API reference documentation: + +``` + cd /opt/gopath/src/github.com/hyperledger/fabric/sdk/node + make doc +``` + + * To build the reference documentation in the [Self Contained Node.js Environment](node-sdk-self-contained.md): + +``` + docker exec -it nodesdk /bin/bash + cd /opt/gopath/src/github.com/hyperledger/fabric/sdk/node + make doc +``` + + * The the API reference documentation will be available in: + ``` + /opt/gopath/src/github.com/hyperledger/fabric/sdk/node/doc + ``` + diff --git a/docs/nodeSDK/app-overview.md b/docs/nodeSDK/app-overview.md new file mode 100755 index 00000000000..fc9d8fd7919 --- /dev/null +++ b/docs/nodeSDK/app-overview.md @@ -0,0 +1,56 @@ +# Application Overview + +Hyperledger fabric supports two types of applications: + + * A standalone application that interacts directly with a blockchain. + See the [Standalone Application](#standaloneApp) section. + + * A web application that interacts with a blockchain on behalf of its web application users. + See the [Web Application](#webApp) section. + + +## Standalone Application + +The following diagram provides an overview of the major components of Hyperledger fabric for the standalone application developer. + + + +In the diagram above, the blue boxes are Hyperledger fabric components and the green boxes are application developer components. Each outer box represents a separate process. + +The **Standalone Application** may be developed in Node.js by using the Hyperledger fabric Client (HFC) SDK for Node.js. This SDK handles all interactions with other Hyperledger fabric components. + + +#### SDK interactions + +The **SDK** interacts with a **Peer** process. If the Peer process fails, the Node.js Client SDK can fail-over to another Peer as shown by the dotted line from the Node.js Client SDK to another Peer. + +This interaction with the Peer consists of submitting transactions to the blockchain. There are three types of transactions: + + * deploy - to deploy developer's chaincode as depicted by the green **CC1** boxes in the diagram; + * invoke - to execute a chaincode function which changes the state of the *blockchain database*; + * query - to execute a chaincode function which may return state information related to the *blockchain database*. + +The **SDK** also interacts with a **Membership Services** process. In fact, if security is enabled in the Peer (strongly recommended), the Node.js client SDK must interact with Membership Services first in order to retrieve credential certificates which are then used to interact with the Peer. + +The interaction with the Membership Services consists of: + + * register - to invite a new user to the blockchain + * enroll - to get access to the blockchain + + +## Web Application + +The following diagram provides an overview of the major components of Hyperledger fabric for the web application developer. + +At a high-level, you can think of the blockchain as a database with which the web application interacts; therefore, it is similar to the following topology. + +``` +browser --> web tier --> database tier + +``` + + + +In the diagram above, the blue boxes are Hyperledger fabric components and the green boxes are application developer components. Each outer box represents a separate process. + +The browser interacts with the developer's Node.js web application using the Hyperledger fabric's Node.js client SDK. The SDK handles all interactions with other Hyperledger fabric components as described in the [SDK interactions](#sdk-interactions) section of [Standalone Application](#standaloneApp). diff --git a/docs/nodeSDK/images/standalone-app-developer.png b/docs/nodeSDK/images/standalone-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/nodeSDK/images/standalone-app-developer.png differ diff --git a/docs/nodeSDK/images/web-app-developer.png b/docs/nodeSDK/images/web-app-developer.png new file mode 100644 index 00000000000..1fb8ac82268 Binary files /dev/null and b/docs/nodeSDK/images/web-app-developer.png differ diff --git a/docs/nodeSDK/node-sdk-guide.md b/docs/nodeSDK/node-sdk-guide.md new file mode 100755 index 00000000000..e47079757e6 --- /dev/null +++ b/docs/nodeSDK/node-sdk-guide.md @@ -0,0 +1,32 @@ +# Hyperledger fabric Client (HFC) SDK for Node.js + +The Hyperledger fabric Client (HFC) SDK for Node.js provides a powerful and easy to use API to interact with a Hyperledger fabric blockchain. The HFC is designed to be used in the Node.js JavaScript runtime. + +#### Overview and Deep Dive + +* [Application Developer's Overview](app-overview.md) for a topological overview of applications and a blockchain. + +* [Hyperledger fabric Client (HFC) SDK for Node.js](node-sdk-indepth.md) the Node.js client SDK in more depth + +#### Development Environment Choices + +* *Recommended:* [Self Contained Node.js Environment](node-sdk-self-contained.md) uses pre-built docker images for the Node.js client application interacting with Hyperledger fabric blockchain. This environment may suffice for a majority of Node.js application developers. The environment contains a built-in standalone sample ready to go. + +* [Full Hyperledger fabric development environment](app-developer-env-setup.md) on how to set up an environment for developing chaincode and applications. + + **Note:** Only recommended for development of the Hyperledger fabric Client SDK itself. + + +#### Sample Code + +* [Node.js Standalone Application in Vagrant](sample-standalone-app.md) for a sample standalone Node.js application running in the full development environment within Vagrant. + +* [Node.js Web Application](sample-web-app.md) for a sample web application and to see how to use the Node.js client SDK for a sample web app leveraging the client SDK to interact with a blockchain network. + + + +#### Related information + + * To build the reference documentation for the Node.js client SDK APIs follow the instructions [here](app-developer-env-setup.md). + + * To learn more about chaincode, see [Writing, Building, and Running Chaincode in a Development Environment](../Setup/Chaincode-setup.md). \ No newline at end of file diff --git a/docs/nodeSDK/node-sdk-indepth.md b/docs/nodeSDK/node-sdk-indepth.md new file mode 100755 index 00000000000..58eec77378b --- /dev/null +++ b/docs/nodeSDK/node-sdk-indepth.md @@ -0,0 +1,169 @@ +# Hyperledger fabric Client (HFC) SDK for Node.js + +The Hyperledger fabric Client (HFC) SDK provides a powerful and easy to use API to interact with a Hyperledger fabric blockchain. + +Below, you'll find the following sections: + +- [Installing only the SDK](#installing-only-the-sdk) +- [Terminology](#terminology) +- [HFC Objects](#hfc-objects) +- [Pluggability](#pluggability) +- [Chaincode Deployment](#chaincode-deployment) +- [Enabling TLS](#enabling-tls) +- [Troubleshooting](#troubleshooting) + +## Installing only the SDK + +If you are an experienced node.js developer and already have a blockchain environment set up and running elsewhere, you can set up a client-only environment to run the node.js client by installing the HFC node module as shown below. This assumes a minimum of npm version 2.11.3 and node.js version 0.12.7 are already installed. + +* To install the latest HFC module of Hyperledger fabric + +``` + npm install hfc +``` + +### Terminology + +In order to transact on a Hyperledger fabric blockchain, you must first have an identity which has been both **registered** and **enrolled** with Membership Services. For a topological overview of how the components interact, see [Application Developer's Overview](app-overview.md). + +Think of **registration** as *issuing a user invitation* to join a blockchain. It consists of adding a new user name (also called an *enrollment ID*) to the membership service configuration. This can be done programatically with the `Member.register` method, or by adding the enrollment ID directly to the [membersrvc.yaml](https://github.com/hyperledger/fabric/blob/master/membersrvc/membersrvc.yaml) configuration file. + +Think of **enrollment** as *accepting a user invitation* to join a blockchain. This is always done by the entity that will transact on the blockchain. This can be done programatically via the `Member.enroll` method. + +## HFC Objects + +HFC is written primarily in typescript. The source can be found in the `fabric/sdk/node/src` directory. The reference documentation is generated automatically from this source code and can be found in`fabric/sdk/node/doc` after building the project. + +The following is a high-level description of the HFC objects (classes and interfaces) to help guide you through the object hierarchy. + +* **Chain** + + This is the main top-level class which is the client's representation of a chain. HFC allows you to interact with multiple chains and to share a single `KeyValStore` and `MemberServices` object with multiple chains if needed. For each chain, you add one or more `Peer` objects which represents the endpoint(s) to which HFC connects to transact on the chain. The second peer is used only if the first peer fails. The third peer is used only if both the first and second peers fail, etc. + +* **KeyValStore** + + This is a very simple interface which HFC uses to store and retrieve all persistent data. This data includes private keys, so it is very important to keep this storage secure. The default implementation is a simple file-based version found in the `FileKeyValStore` class. If running in a clustered web application, you will need to either ensure that a shared file system is used, or you must implement your own `KeyValStore` that can be shared among all cluster members. + +* **MemberServices** + + This is an interface representing Membership Services and is implemented by the `MemberServicesImpl` class. It provides security and identity related features such as privacy, unlinkability, and confidentiality. This implementation issues *ECerts* (enrollment certificates) and *TCerts* (transaction certificates). ECerts are for enrollment identity and TCerts are for transactions. + +* **Member** or **User** + + The Member class most often represents an end User who transacts on the chain, but it may also represent other types of members such as peers. From the Member class, you can *register* and *enroll* members or users. This interacts with the `MemberServices` object. You can also deploy, query, and invoke chaincode directly, which interacts with the `Peer`. The implementation for deploy, query and invoke simply creates a temporary `TransactionContext` object and delegates the work to it. + +* **TransactionContext** + + This class implements the bulk of the deploy, invoke, and query logic. It interacts with Membership Services to get a TCert to perform these operations. Note that there is a one-to-one relationship between TCert and TransactionContext. In other words, a single TransactionContext will always use the same TCert. If you want to issue multiple transactions with the same TCert, then you can get a `TransactionContext` object from a `Member` object directly and issue multiple deploy, invoke, or query operations on it. Note however that if you do this, these transactions are linkable, which means someone could tell that they came from the same user, though not know which user. For this reason, you will typically just call deploy, invoke, and query on the Member or User object. + +## Pluggability + +HFC was designed to support two pluggable components: + +1. Pluggable `KeyValStore` key value store which is used to retrieve and store keys associated with a member. The key value store is used to store sensitive private keys, so care must be taken to properly protect access. + + **IMPORTANT NOTE**: The default KeyValStore is file-based. If multiple instances of a web application run in a cluster, you must provide an implementation of the KeyValStore which is used by all members of the cluster. + +2. Pluggable `MemberServices` which is used to register and enroll members. Member services enables hyperledger to be a permissioned blockchain, providing security services such as anonymity, unlinkability of transactions, and confidentiality + +## Chaincode Deployment + +### 'net' mode + +To have the chaincode deployment succeed in network mode, you must properly set +up the chaincode project outside of your Hyperledger fabric source tree to include all the **golang** dependencies such that when tarred up and sent to the peer, the peer will be able to build the chain code and then deploy it. The +following instructions will demonstrate how to properly set up the directory +structure to deploy *chaincode_example02* in network mode. + +The chaincode project must be placed under the `$GOPATH/src` directory. For example, the [chaincode_example02](https://github.com/hyperledger/fabric/blob/master/examples/chaincode/go/chaincode_example02/chaincode_example02.go) +project should be placed under `$GOPATH/src/` as shown below. + +``` + mkdir -p $GOPATH/src/github.com/chaincode_example02/ + cd $GOPATH/src/github.com/chaincode_example02 + curl GET https://raw.githubusercontent.com/hyperledger/fabric/master/examples/chaincode/go/chaincode_example02/chaincode_example02.go > chaincode_example02.go +``` + +After you have placed your chaincode project under the `$GOPATH/src`, you will need to vendor the dependencies. From the directory containing your chaincode source, run the following commands: + +``` + go get -u github.com/kardianos/govendor + cd $GOPATH/src/github.com/chaincode_example02 + govendor init + govendor fetch github.com/hyperledger/fabric +``` + +Now, execute `go build` to verify that all of the chaincode dependencies are +present. + +``` + go build +``` + +### 'dev' mode +For deploying chaincode in development mode see [Writing, Building, and Running Chaincode in a Development Environment](../Setup/Chaincode-setup.md). +The chaincode must be running and connected to the peer before issuing the `deploy()` from the Node.js application. The hfc `chain` object must be set to dev mode. + +```javascript +chain.setDevMode(true); +``` +The deploy request must include the `chaincodeName` that the chaincode registered with the peer. The built-in chaincode example checks an environment variable `CORE_CHAINCODE_ID_NAME=mycc` when it starts. + +```json + var deployRequest = { + chaincodeName: 'mycc', + fcn: "init", + args: ["a", "100", "b", "200"] + }; +``` + +### Enabling TLS + +If you wish to configure TLS with the Membership Services server, the following steps are required: + +- Modify `$GOPATH/src/github.com/hyperledger/fabric/membersrvc/membersrvc.yaml` as follows: + +``` + server: + tls: + cert: + file: "/var/hyperledger/production/.membersrvc/tlsca.cert" + key: + file: "/var/hyperledger/production/.membersrvc/tlsca.priv" +``` + +To specify to the Membership Services (TLS) Certificate Authority (TLSCA) what X.509 v3 Certificate (with a corresponding Private Key) to use: + +- Modify `$GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml` as follows: + +``` + peer: + pki: + tls: + enabled: true + rootcert: + file: "/var/hyperledger/production/.membersrvc/tlsca.cert" +``` + +To configure the peer to connect to the Membership Services server over TLS (otherwise, the connection will fail). + +- Bootstrap your Membership Services and the peer. This is needed in order to have the file *tlsca.cert* generated by the member services. + +- Copy `/var/hyperledger/production/.membersrvc/tlsca.cert` to `$GOPATH/src/github.com/hyperledger/fabric/sdk/node`. + +*Note:* If you cleanup the folder `/var/hyperledger/production` then don't forget to copy again the *tlsca.cert* file as described above. + +### Troubleshooting +If you see errors stating that the client has already been registered/enrolled, keep in mind that you can perform the enrollment process only once, as the enrollmentSecret is a one-time-use password. You will see these errors if you have performed a user registration/enrollment and subsequently deleted the cryptographic tokens stored on the client side. The next time you try to enroll, errors similar to the ones below will be seen. + +``` + Error: identity or token do not match + +``` + +``` + Error: user is already registered + +``` + +To address this, remove any stored cryptographic material from the CA server by following the instructions [here](https://github.com/hyperledger/fabric/blob/master/docs/Setup/Chaincode-setup.md#removing-temporary-files-when-security-is-enabled). You will also need to remove any of the cryptographic tokens stored on the client side by deleting the KeyValStore directory. That directory is configurable and is set to `/tmp/keyValStore` within the unit tests. diff --git a/docs/nodeSDK/sample-standalone-app.md b/docs/nodeSDK/sample-standalone-app.md new file mode 100755 index 00000000000..8ee2c6249f8 --- /dev/null +++ b/docs/nodeSDK/sample-standalone-app.md @@ -0,0 +1,49 @@ +# Node.js Standalone Application in Vagrant + +This section describes how to run a sample standalone Node.js application which interacts with a Hyperledger fabric blockchain. + +* If you haven't already done so, see [Setting Up The Application Development Environment](app-developer-env-setup.md) to get your environment set up. The remaining steps assume that you are running **inside the vagrant environment**. + +* Issue the following commands to build the Node.js Client SDK: + +``` + cd /opt/gopath/src/github.com/hyperledger/fabric + make node-sdk +``` + +* Start the membership services and peer processes. We run the peer in dev mode for simplicity. + +``` + cd /opt/gopath/src/github.com/hyperledger/fabric/build/bin + membersrvc > membersrvc.log 2>&1& + peer node start --peer-chaincodedev > peer.log 2>&1& +``` + +* Build and run chaincode example 2: + +``` + cd /opt/gopath/src/github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 + go build + CORE_CHAINCODE_ID_NAME=mycc CORE_PEER_ADDRESS=0.0.0.0:7051 ./chaincode_example02 > log 2>&1& +``` + +* Put the following sample app in a file named **app.js** in the */tmp* directory. Take a moment (now or later) to read the comments and code to begin to learn the Node.js Client SDK APIs. + + You may retrieve the [sample application](https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/app.js) file: +``` + cd /tmp + curl -o app.js https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/app.js +``` +* Run **npm** to install Hyperledger Fabric Node.js SDK in the `/tmp` directory +``` + npm install /opt/gopath/src/github.com/hyperledger/fabric/sdk/node +``` + +* To run the application : + +``` + CORE_CHAINCODE_ID_NAME=mycc CORE_PEER_ADDRESS=0.0.0.0:7051 MEMBERSRVC_ADDRESS=0.0.0.0:7054 DEPLOY_MODE=dev node app +``` + +Congratulations! You've successfully run your first Hyperledger fabric application. + diff --git a/docs/nodeSDK/sample-web-app.md b/docs/nodeSDK/sample-web-app.md new file mode 100755 index 00000000000..911942ce083 --- /dev/null +++ b/docs/nodeSDK/sample-web-app.md @@ -0,0 +1,13 @@ +# Node.js Web Application + +The following is a web application template. It is NOT a complete program. + +This template code demonstrates how to communicate with a blockchain from a Node.js web application. It does not show how to listen for incoming requests nor how to authenticate your web users as this is application specific. The code below is intended to demonstrate how to interact with a Hyperledger fabric blockchain from an existing web application. + +You may retrieve the [sample application]( https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/web-app.js) file: + +``` + curl -o app.js https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/web-app.js +``` + + diff --git a/docs/protocol-spec.md b/docs/protocol-spec.md index fcd204d2822..e80b4d1894e 100644 --- a/docs/protocol-spec.md +++ b/docs/protocol-spec.md @@ -2520,7 +2520,7 @@ POST host:port/chaincode "params": { "type": "GOLANG", "chaincodeID":{ - "path":"github.com/hyperledger/fabic/examples/chaincode/go/chaincode_example02" + "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { "function":"init", @@ -2555,7 +2555,7 @@ POST host:port/chaincode "params": { "type": "GOLANG", "chaincodeID":{ - "path":"github.com/hyperledger/fabic/examples/chaincode/go/chaincode_example02" + "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { "function":"init", diff --git a/docs/protocol-spec_zh.md b/docs/protocol-spec_zh.md index 9e26824780c..c8be3d94c57 100755 --- a/docs/protocol-spec_zh.md +++ b/docs/protocol-spec_zh.md @@ -2219,7 +2219,7 @@ POST host:port/chaincode "params": { "type": "GOLANG", "chaincodeID":{ - "path":"github.com/hyperledger/fabic/examples/chaincode/go/chaincode_example02" + "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { "function":"init", @@ -2254,7 +2254,7 @@ POST host:port/chaincode "params": { "type": "GOLANG", "chaincodeID":{ - "path":"github.com/hyperledger/fabic/examples/chaincode/go/chaincode_example02" + "path":"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" }, "ctorMsg": { "function":"init", @@ -2603,7 +2603,7 @@ To see what CLI commands are currently available in the implementation, execute 要查看当前可用的CLI命令,执行如下命令 - cd $GOPATH/src/github.com/hyperledger/fabic/peer + cd $GOPATH/src/github.com/hyperledger/fabric/peer ./peer 你可以获得和下面类似的响应: diff --git a/docs/releases.md b/docs/releases.md new file mode 100644 index 00000000000..51ba90eadfc --- /dev/null +++ b/docs/releases.md @@ -0,0 +1,62 @@ + + +[v0.6-preview](https://github.com/hyperledger/fabric/tree/v0.6) September 16, 2016 + +A developer preview release of the Hyperledger Fabric intended +to exercise the release logistics and stabilize a set of capabilities for +developers to try out. This will be the last release under the original +architecture. All subsequent releases will deliver on the +[v1.0 architecture](TODO). + +Key enhancements: + +* 8de58ed - NodeSDK doc changes -- FAB-146 +* 62d866d - Add flow control to SYNC_STATE_SNAPSHOT +* 4d97069 - Adding TLS changes to SDK +* e9d3ac2 - Node-SDK: add support for fabric events(block, chaincode, transactional) +* 7ed9533 - Allow deploying Java chaincode from remote git repositories +* 4bf9b93 - Move Docker-Compose files into their own folder +* ce9fcdc - Print ChaincodeName when deploy with CLI +* 4fa1360 - Upgrade go protobuf from 3-beta to 3 +* 4b13232 - Table implementation in java shim with example +* df741bc - Add support for dynamically registering a user with attributes +* 4203ea8 - Check for duplicates when adding peers to the chain +* 518f3c9 - Update docker openjdk image +* 47053cd - Add GetTxID function to Stub interface (FAB-306) +* ac182fa - Remove deprecated devops REST API +* ad4645d - Support hyperledger fabric build on ppc64le platform +* 21a4a8a - SDK now properly adding a peer with an invalid URL +* 1d8114f - Fix setting of watermark on restore from crash +* a98c59a - Upgrade go protobuff from 3-beta to 3 +* 937039c - DEVENV: Provide strong feedback when provisioning fails +* d74b1c5 - Make pbft broadcast timeout configurable +* 97ed71f - Java shim/chaincode project reorg, separate java docker env +* a76dd3d - Start container with HostConfig was deprecated since v1.10 and removed since v1.12 +* 8b63a26 - Add ability to unregister for events +* 3f5b2fa - Add automatic peer command detection +* 6daedfd - Re-enable sending of chaincode events +* b39c93a - Update Cobra and pflag vendor libraries +* dad7a9d - Reassign port numbers to 7050-7060 range + +[v0.5-developer-preview](https://github.com/hyperledger-archives/fabric/tree/v0.5-developer-preview) +June 17, 2016 + +A developer preview release of the Hyperledger Fabric intended +to exercise the release logistics and stabilize a set of capabilities for +developers to try out. + +Key features: + +Permissioned blockchain with immediate finality +Chaincode (aka smart contract) execution environments +Docker container (user chaincode) +In-process with peer (system chaincode) +Pluggable consensus with PBFT, NOOPS (development mode), SIEVE (prototype) +Event framework supports pre-defined and custom events +Client SDK (Node.js), basic REST APIs and CLIs +Known Key Bugs and work in progress + +* 1895 - Client SDK interfaces may crash if wrong parameter specified +* 1901 - Slow response after a few hours of stress testing +* 1911 - Missing peer event listener on the client SDK +* 889 - The attributes in the TCert are not encrypted. This work is still on-going diff --git a/docs/starter/fabric-starter-kit.md b/docs/starter/fabric-starter-kit.md new file mode 100755 index 00000000000..7d199ad1281 --- /dev/null +++ b/docs/starter/fabric-starter-kit.md @@ -0,0 +1,170 @@ +# Fabric Starter Kit + +This section describes how to set up a self-contained environment for +application development with the Hyperledger fabric. The setup +uses **Docker** to provide a controlled environment with all the necessary +Hyperledger fabric components to support a Node.js application built with +the fabric's Node.js SDK, and chaincode written in Go. + +There are three Docker images that, when run, will provide a basic +network environment. There is an image to run a single `peer`, one to run +the `membersrvc`, and one to run both your Node.js application and your +chaincode. See [Application Developer's Overview](../nodeSDK/app-overview.md) on how the +components running within the containers will communicate. + +The starter kit comes with a sample Node.js application ready to execute and +sample chaincode. The starter kit will be running in chaincode developer mode. +In this mode, the chaincode is built and started prior to the application +making a call to deploy it. + +**Note:** The deployment of chaincode in network mode requires that the +Hyperledger fabric Node.js SDK has access to the chaincode source code and all +of its dependencies, in order to properly build a deploy request. It also +requires that the `peer` have access to the Docker daemon to be able to build +and deploy the new Docker image that will run the chaincode. *This is a more +complicated configuration and not suitable to an introduction to the +Hyperledger fabric.* We recommend first running in chaincode development mode. + +## Further exploration + +If you wish, there are a number of chaincode examples near by. +``` + cd ../../chaincode +``` +## Getting started + +**Note:** This sample was prepared using Docker for Mac 1.12.0 + +* Prerequisite software to install: + + * [Docker](https://www.docker.com/products/overview) + * docker-compose (may be packaged with Docker) + +* Copy our [docker-compose.yml](https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/docker-compose.yml) file to a local directory: + +``` + curl -o docker-compose.yml https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/docker-compose.yml +``` + The docker-compose environment uses three Docker images. Two are published to + DockerHub. However, with the third, we provide you the source to build your own, + so that you can customize it to inject your application code for development. The following [Dockerfile](https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/Dockerfile) + is used to build the base **fabric-starter-kit** image and may be used as + a starting point for your own customizations. + +``` + curl -o Dockerfile https://raw.githubusercontent.com/hyperledger/fabric/master/examples/sdk/node/Dockerfile + docker build -t hyperledger/fabric-starter-kit:latest . +``` + +* Start the fabric network environment using docker-compose. From a terminal +session that has the working directory of where the above *docker-compose.yml* +is located, execute one of following `docker-compose` commands. + + * to run as detached containers: + +``` + docker-compose up -d +``` + **note:** to see the logs for the `peer` container use the + `docker logs peer` command + + * to run in the foreground and see the log output in the current terminal + session: + +``` + docker-compose up +``` + + Both commands will start three Docker containers. To view the container + status use the `docker ps` command. The first time this is run, the Docker + images will be downloaded. This may take 10 minutes or more depending on the + network connections of the system running the command. + +``` + docker ps +``` + You should see something similar to the following: + +``` + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + bb01a2fa96ef hyperledger/fabric-starter-kit "sh -c 'sleep 20; /op" About a minute ago Up 59 seconds starter + ec7572e65f12 hyperledger/fabric-peer "sh -c 'sleep 10; pee" About a minute ago Up About a minute peer + 118ef6da1709 hyperledger/fabric-membersrvc "membersrvc" About a minute ago Up About a minute membersrvc +``` + +* Start a terminal session in the **starter** container. This is where the +Node.js application is located. + + **note:** Be sure to wait 20 seconds after starting the network using the + `docker-compose up` command before executing the following command to allow + the network to initialize: + +``` + docker exec -it starter /bin/bash +``` + +* From the terminal session in the **starter** container execute the standalone +Node.js application. The Docker terminal session should be in the working +directory of the sample application called **app.js** (*/opt/gopath/src/github.com/hyperledger/fabric/examples/sdk/node*). Execute +the following Node.js command to run the application: + +``` + node app +``` + In another terminal session on the host you can view the logs for the peer + by executing the following command (not in the docker shell above, in a new + terminal session of the real system): + +``` + docker logs peer +``` + +* If you wish to run your own Node.js application using the pre-built Docker +images: + * use the directories in the `volumes` tag under **starter** in the + `docker-compose.yml` file as a place to store your programs from the host + system into the docker container. The first path is the top level system + (host system) and the second is created in the Docker container. If you wish + to use a host location that is not under the `/Users` directory (`~` is + under `/Users') then you must add that to the Docker file sharing + under Docker preferences. + +```yaml + volumes: + - ~/mytest:/user/mytest +``` + * copy or create and edit your application in the `~/mytest` directory as + stated in the `docker-compose.yml` `volumes` tag under **starter** container. + * run npm to install Hyperledger fabric Node.js SDK in the `mytest` directory: + +``` + npm install /opt/gopath/src/github.com/hyperledger/fabric/sdk/node +``` + * run the application from within the **starter** Docker container using the + following commands: + +``` + docker exec -it starter /bin/bash +``` + once in the shell, and assuming your Node.js application is called `app.js`: + +``` + cd /user/mytest + node app +``` +* To shutdown the environment, execute the following **docker-compose** command +in the directory where the *docker-compose.yml* is located. Any changes you made +to the sample application or deployment of a chaincode will be lost. Only +changes made to the shared area defined in the 'volumes' tag of the **starter** +container will persist. This will shutdown each of the containers and remove +the containers from Docker: + +``` + docker-compose down +``` + or if you wish to keep your changes and just stop the containers, which will + be restarted on the next `up` command: + +``` + docker-compose kill +``` diff --git a/events/producer/handler.go b/events/producer/handler.go index 57befb82308..4ffe8e2022d 100644 --- a/events/producer/handler.go +++ b/events/producer/handler.go @@ -25,7 +25,6 @@ import ( type handler struct { ChatStream pb.Events_ChatServer - doneChan chan bool interestedEvents map[string]*pb.Interest } @@ -34,7 +33,6 @@ func newEventHandler(stream pb.Events_ChatServer) (*handler, error) { ChatStream: stream, } d.interestedEvents = make(map[string]*pb.Interest) - d.doneChan = make(chan bool) return d, nil } @@ -42,7 +40,6 @@ func newEventHandler(stream pb.Events_ChatServer) (*handler, error) { func (d *handler) Stop() error { d.deregisterAll() d.interestedEvents = nil - d.doneChan <- true return nil } diff --git a/events/producer/producer.go b/events/producer/producer.go index 4e70d0fd56a..85508b936fe 100644 --- a/events/producer/producer.go +++ b/events/producer/producer.go @@ -68,7 +68,7 @@ func (p *EventsServer) Chat(stream pb.Events_ChatServer) error { err = handler.HandleMessage(in) if err != nil { producerLogger.Errorf("Error handling message: %s", err) - //return err + return err } } diff --git a/examples/chaincode/go/asset_management/app/app_internal.go b/examples/chaincode/go/asset_management/app/app_internal.go index 631ba346a42..f9adb444ba8 100644 --- a/examples/chaincode/go/asset_management/app/app_internal.go +++ b/examples/chaincode/go/asset_management/app/app_internal.go @@ -143,7 +143,7 @@ func deployInternal(deployer crypto.Client, adminCert crypto.CertificateHandler) Type: 1, ChaincodeID: &pb.ChaincodeID{Path: "github.com/hyperledger/fabric/examples/chaincode/go/asset_management"}, //ChaincodeID: &pb.ChaincodeID{Name: chaincodeName}, - CtorMsg: &pb.ChaincodeInput{Function: "init", Args: []string{}}, + CtorMsg: &pb.ChaincodeInput{Args: util.ToChaincodeArgs("init")}, Metadata: adminCert.GetCertificate(), ConfidentialityLevel: confidentialityLevel, } @@ -187,8 +187,7 @@ func assignOwnershipInternal(invoker crypto.Client, invokerCert crypto.Certifica } chaincodeInput := &pb.ChaincodeInput{ - Function: "assign", - Args: []string{asset, base64.StdEncoding.EncodeToString(newOwnerCert.GetCertificate())}, + Args: util.ToChaincodeArgs("assign", asset, base64.StdEncoding.EncodeToString(newOwnerCert.GetCertificate())), } chaincodeInputRaw, err := proto.Marshal(chaincodeInput) if err != nil { @@ -239,8 +238,7 @@ func transferOwnershipInternal(owner crypto.Client, ownerCert crypto.Certificate } chaincodeInput := &pb.ChaincodeInput{ - Function: "transfer", - Args: []string{asset, base64.StdEncoding.EncodeToString(newOwnerCert.GetCertificate())}, + Args: util.ToChaincodeArgs("transfer", asset, base64.StdEncoding.EncodeToString(newOwnerCert.GetCertificate())), } chaincodeInputRaw, err := proto.Marshal(chaincodeInput) if err != nil { @@ -275,7 +273,7 @@ func transferOwnershipInternal(owner crypto.Client, ownerCert crypto.Certificate } func whoIsTheOwner(invoker crypto.Client, asset string) (transaction *pb.Transaction, resp *pb.Response, err error) { - chaincodeInput := &pb.ChaincodeInput{Function: "query", Args: []string{asset}} + chaincodeInput := &pb.ChaincodeInput{Args: util.ToChaincodeArgs("query", asset)} // Prepare spec and submit spec := &pb.ChaincodeSpec{ diff --git a/examples/chaincode/go/asset_management/asset_management.go b/examples/chaincode/go/asset_management/asset_management.go index 2ec8d15bae1..1734c2c672d 100644 --- a/examples/chaincode/go/asset_management/asset_management.go +++ b/examples/chaincode/go/asset_management/asset_management.go @@ -266,7 +266,7 @@ func (t *AssetManagementChaincode) Query(stub shim.ChaincodeStubInterface) ([]by myLogger.Debugf("Query [%s]", function) if function != "query" { - return nil, errors.New("Invalid query function name. Expecting \"query\"") + return nil, errors.New("Invalid query function name. Expecting 'query' but found '" + function + "'") } var err error diff --git a/examples/chaincode/go/asset_management02/asset_management02_test.go b/examples/chaincode/go/asset_management02/asset_management02_test.go index 9f2d13831d3..14efcb800da 100755 --- a/examples/chaincode/go/asset_management02/asset_management02_test.go +++ b/examples/chaincode/go/asset_management02/asset_management02_test.go @@ -24,7 +24,6 @@ import ( "testing" "time" - "io/ioutil" "os" "path/filepath" @@ -34,6 +33,7 @@ import ( "github.com/hyperledger/fabric/core/container" "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/util" "github.com/hyperledger/fabric/membersrvc/ca" pb "github.com/hyperledger/fabric/protos" "github.com/op/go-logging" @@ -330,7 +330,7 @@ func deploy(admCert crypto.CertificateHandler) error { spec := &pb.ChaincodeSpec{ Type: 1, ChaincodeID: &pb.ChaincodeID{Name: "mycc"}, - CtorMsg: &pb.ChaincodeInput{Function: "init", Args: []string{}}, + CtorMsg: &pb.ChaincodeInput{Args: util.ToChaincodeArgs("init")}, Metadata: []byte("issuer"), ConfidentialityLevel: pb.ConfidentialityLevel_PUBLIC, } @@ -373,7 +373,7 @@ func assignOwnership(assigner crypto.Client, newOwnerCert crypto.CertificateHand return err } - chaincodeInput := &pb.ChaincodeInput{Function: "assignOwnership", Args: []string{base64.StdEncoding.EncodeToString(newOwnerCert.GetCertificate()), attributeName, amount}} + chaincodeInput := &pb.ChaincodeInput{Args: util.ToChaincodeArgs("assignOwnership", base64.StdEncoding.EncodeToString(newOwnerCert.GetCertificate()), attributeName, amount)} // Prepare spec and submit spec := &pb.ChaincodeSpec{ @@ -419,12 +419,13 @@ func transferOwnership(owner crypto.Client, ownerCert crypto.CertificateHandler, return err } - args := []string{base64.StdEncoding.EncodeToString(ownerCert.GetCertificate()), + chaincodeInput := &pb.ChaincodeInput{Args: util.ToChaincodeArgs( + "transferOwnership", + base64.StdEncoding.EncodeToString(ownerCert.GetCertificate()), fromAttributes, base64.StdEncoding.EncodeToString(newOwnerCert.GetCertificate()), toAttributes, - amount} - chaincodeInput := &pb.ChaincodeInput{Function: "transferOwnership", Args: args} + amount)} // Prepare spec and submit spec := &pb.ChaincodeSpec{ @@ -466,7 +467,7 @@ func getBalance(accountID string) ([]byte, error) { } func Query(function, accountID string) ([]byte, error) { - chaincodeInput := &pb.ChaincodeInput{Function: function, Args: []string{accountID}} + chaincodeInput := &pb.ChaincodeInput{Args: util.ToChaincodeArgs(function, accountID)} // Prepare spec and submit spec := &pb.ChaincodeSpec{ @@ -526,10 +527,11 @@ func setup() { } func initMembershipSrvc() { - ca.LogInit(ioutil.Discard, os.Stdout, os.Stdout, os.Stderr, os.Stdout) + // ca.LogInit seems to have been removed + //ca.LogInit(ioutil.Discard, os.Stdout, os.Stdout, os.Stderr, os.Stdout) ca.CacheConfiguration() // Cache configuration aca = ca.NewACA() - eca = ca.NewECA() + eca = ca.NewECA(aca) tca = ca.NewTCA(eca) tlsca = ca.NewTLSCA(eca) diff --git a/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles.go b/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles.go index 2329f1aedac..b2e2d729a90 100644 --- a/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles.go +++ b/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles.go @@ -114,7 +114,7 @@ func (t *AssetManagementChaincode) assign(stub shim.ChaincodeStubInterface, args callerRole, err := stub.ReadCertAttribute("role") if err != nil { - fmt.Printf("Error reading attribute [%v] \n", err) + fmt.Printf("Error reading attribute 'role' [%v] \n", err) return nil, fmt.Errorf("Failed fetching caller role. Error was [%v]", err) } @@ -237,7 +237,7 @@ func (t *AssetManagementChaincode) Invoke(stub shim.ChaincodeStubInterface) ([]b func (t *AssetManagementChaincode) Query(stub shim.ChaincodeStubInterface) ([]byte, error) { function, args := stub.GetFunctionAndParameters() if function != "query" { - return nil, errors.New("Invalid query function name. Expecting \"query\"") + return nil, errors.New("Invalid query function name. Expecting 'query' but found '" + function + "'") } var err error diff --git a/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles_test.go b/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles_test.go index d90f50ae1ef..7c087bd6502 100644 --- a/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles_test.go +++ b/examples/chaincode/go/asset_management_with_roles/asset_management_with_roles_test.go @@ -366,7 +366,7 @@ func setup() { func initMembershipSrvc() { ca.CacheConfiguration() // Cache configuration aca = ca.NewACA() - eca = ca.NewECA() + eca = ca.NewECA(aca) tca = ca.NewTCA(eca) tlsca = ca.NewTLSCA(eca) diff --git a/examples/chaincode/go/attributes_to_state/attributes_to_state.go b/examples/chaincode/go/attributes_to_state/attributes_to_state.go index 211d0b81500..d33a9327947 100644 --- a/examples/chaincode/go/attributes_to_state/attributes_to_state.go +++ b/examples/chaincode/go/attributes_to_state/attributes_to_state.go @@ -50,7 +50,7 @@ func (t *Attributes2State) setStateToAttributes(stub shim.ChaincodeStubInterface // Init intializes the chaincode by reading the transaction attributes and storing // the attrbute values in the state func (t *Attributes2State) Init(stub shim.ChaincodeStubInterface) ([]byte, error) { - function, args := stub.GetFunctionAndParameters() + _, args := stub.GetFunctionAndParameters() err := t.setStateToAttributes(stub, args) if err != nil { return nil, err diff --git a/examples/chaincode/go/chaincode_example02/chaincode_example02.go b/examples/chaincode/go/chaincode_example02/chaincode_example02.go index a4148f3fb8d..104b75645bb 100644 --- a/examples/chaincode/go/chaincode_example02/chaincode_example02.go +++ b/examples/chaincode/go/chaincode_example02/chaincode_example02.go @@ -113,6 +113,9 @@ func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) ([]byte, erro // Perform the execution X, err = strconv.Atoi(args[2]) + if err != nil { + return nil, errors.New("Invalid transaction amount, expecting a integer value") + } Aval = Aval - X Bval = Bval + X fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) diff --git a/examples/chaincode/go/chaincode_example04/chaincode_example04.go b/examples/chaincode/go/chaincode_example04/chaincode_example04.go index d6aa2fae7ad..6394c3ba0cd 100644 --- a/examples/chaincode/go/chaincode_example04/chaincode_example04.go +++ b/examples/chaincode/go/chaincode_example04/chaincode_example04.go @@ -34,7 +34,7 @@ type SimpleChaincode struct { func (t *SimpleChaincode) GetChaincodeToCall() string { //This is the hashcode for github.com/hyperledger/fabric/core/example/chaincode/chaincode_example02 //if the example is modifed this hashcode will change!! - chainCodeToCall := "dbf03d6840375cf5dd188a745311a34f6c449cfe5e0d5dbfffa1b22106b08bbb64669662684e71d9c1c2f476e5ba24ff35b93314e35b09124ea72c8297480be8" + chainCodeToCall := "d8f2ef95a72aa85b0f92580580176479fcd0874f6b0855ae21b98dcb926353d357e94cc80b5fb9b789d3a0acfdf143166b3bdc3e483feabeb4ab0b0a530b83a6" return chainCodeToCall } diff --git a/examples/chaincode/go/rbac_tcerts_no_attrs/rbac_test.go b/examples/chaincode/go/rbac_tcerts_no_attrs/rbac_test.go index f611b2a814f..fe9affdb6a8 100644 --- a/examples/chaincode/go/rbac_tcerts_no_attrs/rbac_test.go +++ b/examples/chaincode/go/rbac_tcerts_no_attrs/rbac_test.go @@ -25,7 +25,6 @@ import ( "testing" "time" - "io/ioutil" "os" "path/filepath" @@ -35,6 +34,7 @@ import ( "github.com/hyperledger/fabric/core/container" "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/util" "github.com/hyperledger/fabric/membersrvc/ca" pb "github.com/hyperledger/fabric/protos" "github.com/op/go-logging" @@ -175,7 +175,7 @@ func deploy(admCert crypto.CertificateHandler) error { spec := &pb.ChaincodeSpec{ Type: 1, ChaincodeID: &pb.ChaincodeID{Name: "mycc"}, - CtorMsg: &pb.ChaincodeInput{Function: "init", Args: []string{}}, + CtorMsg: &pb.ChaincodeInput{Args: util.ToChaincodeArgs("init")}, Metadata: admCert.GetCertificate(), ConfidentialityLevel: pb.ConfidentialityLevel_PUBLIC, } @@ -197,7 +197,7 @@ func deploy(admCert crypto.CertificateHandler) error { ledger, err := ledger.GetLedger() ledger.BeginTxBatch("1") - _, err = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) + _, _, err = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) if err != nil { return fmt.Errorf("Error deploying chaincode: %s", err) } @@ -222,7 +222,7 @@ func addRole(admCert crypto.CertificateHandler, idCert crypto.CertificateHandler return err } - chaincodeInput := &pb.ChaincodeInput{Function: "addRole", Args: []string{string(idCert.GetCertificate()), role}} + chaincodeInput := &pb.ChaincodeInput{Args: util.ToChaincodeArgs("addRole", string(idCert.GetCertificate()), role)} chaincodeInputRaw, err := proto.Marshal(chaincodeInput) if err != nil { return err @@ -263,7 +263,7 @@ func addRole(admCert crypto.CertificateHandler, idCert crypto.CertificateHandler ledger, err := ledger.GetLedger() ledger.BeginTxBatch("1") - _, err = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) + _, _, err = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) if err != nil { return fmt.Errorf("Error deploying chaincode: %s", err) } @@ -289,7 +289,7 @@ func write(invoker crypto.Client, invokerCert crypto.CertificateHandler, value [ return err } - chaincodeInput := &pb.ChaincodeInput{Function: "write", Args: []string{string(value)}} + chaincodeInput := &pb.ChaincodeInput{Args: util.ToChaincodeArgs("write", string(value))} chaincodeInputRaw, err := proto.Marshal(chaincodeInput) if err != nil { return err @@ -330,7 +330,7 @@ func write(invoker crypto.Client, invokerCert crypto.CertificateHandler, value [ ledger, err := ledger.GetLedger() ledger.BeginTxBatch("1") - _, err = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) + _, _, err = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) if err != nil { return fmt.Errorf("Error deploying chaincode: %s", err) } @@ -357,7 +357,7 @@ func read(invoker crypto.Client, invokerCert crypto.CertificateHandler) ([]byte, return nil, err } - chaincodeInput := &pb.ChaincodeInput{Function: "read", Args: []string{}} + chaincodeInput := &pb.ChaincodeInput{Args: util.ToChaincodeArgs("read")} chaincodeInputRaw, err := proto.Marshal(chaincodeInput) if err != nil { return nil, err @@ -398,7 +398,7 @@ func read(invoker crypto.Client, invokerCert crypto.CertificateHandler) ([]byte, ledger, err := ledger.GetLedger() ledger.BeginTxBatch("1") - result, err := chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) + result, _, err := chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction) if err != nil { return nil, fmt.Errorf("Error deploying chaincode: %s", err) } @@ -440,9 +440,8 @@ func setup() { } func initMemershipServices() { - ca.LogInit(ioutil.Discard, os.Stdout, os.Stdout, os.Stderr, os.Stdout) ca.CacheConfiguration() // Cache configuration - eca = ca.NewECA() + eca = ca.NewECA(nil) tca = ca.NewTCA(eca) tlsca = ca.NewTLSCA(eca) diff --git a/examples/chaincode/go/utxo/Dockerfile b/examples/chaincode/go/utxo/Dockerfile index 927b746931c..807632d9c30 100644 --- a/examples/chaincode/go/utxo/Dockerfile +++ b/examples/chaincode/go/utxo/Dockerfile @@ -1,4 +1,5 @@ -from hyperledger/fabric-baseimage +# FIXME: someone from the UTXO team will need to verify or rework this +FROM ubuntu:latest RUN apt-get update && apt-get install pkg-config autoconf libtool -y RUN cd /tmp && git clone https://github.com/bitcoin/secp256k1.git && cd secp256k1/ diff --git a/examples/chaincode/go/utxo/util/dah.pb.go b/examples/chaincode/go/utxo/util/dah.pb.go index 0d694eec855..50f1df3b8f4 100644 --- a/examples/chaincode/go/utxo/util/dah.pb.go +++ b/examples/chaincode/go/utxo/util/dah.pb.go @@ -22,6 +22,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type TX struct { Version uint32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` LockTime uint32 `protobuf:"varint,2,opt,name=lockTime" json:"lockTime,omitempty"` @@ -31,9 +37,10 @@ type TX struct { Fee uint64 `protobuf:"varint,6,opt,name=fee" json:"fee,omitempty"` } -func (m *TX) Reset() { *m = TX{} } -func (m *TX) String() string { return proto.CompactTextString(m) } -func (*TX) ProtoMessage() {} +func (m *TX) Reset() { *m = TX{} } +func (m *TX) String() string { return proto.CompactTextString(m) } +func (*TX) ProtoMessage() {} +func (*TX) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *TX) GetTxin() []*TX_TXIN { if m != nil { @@ -56,9 +63,10 @@ type TX_TXIN struct { Sequence uint32 `protobuf:"varint,4,opt,name=sequence" json:"sequence,omitempty"` } -func (m *TX_TXIN) Reset() { *m = TX_TXIN{} } -func (m *TX_TXIN) String() string { return proto.CompactTextString(m) } -func (*TX_TXIN) ProtoMessage() {} +func (m *TX_TXIN) Reset() { *m = TX_TXIN{} } +func (m *TX_TXIN) String() string { return proto.CompactTextString(m) } +func (*TX_TXIN) ProtoMessage() {} +func (*TX_TXIN) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } type TX_TXOUT struct { Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` @@ -67,6 +75,37 @@ type TX_TXOUT struct { Quantity uint64 `protobuf:"varint,4,opt,name=quantity" json:"quantity,omitempty"` } -func (m *TX_TXOUT) Reset() { *m = TX_TXOUT{} } -func (m *TX_TXOUT) String() string { return proto.CompactTextString(m) } -func (*TX_TXOUT) ProtoMessage() {} +func (m *TX_TXOUT) Reset() { *m = TX_TXOUT{} } +func (m *TX_TXOUT) String() string { return proto.CompactTextString(m) } +func (*TX_TXOUT) ProtoMessage() {} +func (*TX_TXOUT) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} } + +func init() { + proto.RegisterType((*TX)(nil), "util.TX") + proto.RegisterType((*TX_TXIN)(nil), "util.TX.TXIN") + proto.RegisterType((*TX_TXOUT)(nil), "util.TX.TXOUT") +} + +func init() { proto.RegisterFile("dah.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 277 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x54, 0x91, 0xcf, 0x4b, 0xc3, 0x30, + 0x1c, 0xc5, 0x59, 0x9b, 0x56, 0xfd, 0xba, 0x0d, 0xf9, 0x22, 0x12, 0x7a, 0x90, 0x29, 0x1e, 0x76, + 0xea, 0x41, 0xff, 0x09, 0xbd, 0x28, 0x84, 0x0a, 0xbb, 0x76, 0x31, 0xba, 0x68, 0x6d, 0xb6, 0xfc, + 0x18, 0xf5, 0xea, 0x5f, 0x6e, 0x92, 0x76, 0xa3, 0x42, 0x0f, 0xf9, 0xf4, 0x3d, 0xde, 0x7b, 0x21, + 0x70, 0xf6, 0x56, 0x6f, 0xca, 0xad, 0x56, 0x56, 0x21, 0x71, 0x56, 0x36, 0xb7, 0xbf, 0x29, 0x24, + 0xd5, 0x0a, 0x29, 0x9c, 0xec, 0x85, 0x36, 0x52, 0xb5, 0x74, 0xb2, 0x98, 0x2c, 0x67, 0xec, 0x80, + 0x58, 0xc0, 0x69, 0xa3, 0xf8, 0x57, 0x25, 0xbf, 0x05, 0x4d, 0xa2, 0x74, 0x64, 0xbc, 0x01, 0x62, + 0x3b, 0xd9, 0xd2, 0x74, 0x91, 0x2e, 0xcf, 0xef, 0x67, 0x65, 0x48, 0x2c, 0xab, 0x95, 0xff, 0x9e, + 0x9e, 0x59, 0x94, 0xf0, 0x0e, 0x32, 0xdb, 0x29, 0x67, 0x29, 0x89, 0x9e, 0xf9, 0xc8, 0xf3, 0xf2, + 0x5a, 0xb1, 0x5e, 0xc4, 0x2b, 0xc8, 0xd7, 0x21, 0xd5, 0xd0, 0xcc, 0xdb, 0xa6, 0x6c, 0x20, 0xbc, + 0x80, 0xf4, 0x5d, 0x08, 0x9a, 0xfb, 0x5e, 0xc2, 0xc2, 0xb1, 0xf8, 0x04, 0x12, 0xd2, 0x71, 0x0e, + 0x89, 0xec, 0x86, 0xad, 0xfe, 0x84, 0xd7, 0x00, 0x46, 0x39, 0xcd, 0xc5, 0x63, 0x6d, 0x36, 0x71, + 0xe8, 0x94, 0x8d, 0xfe, 0x84, 0x06, 0xc3, 0xb5, 0xdc, 0x5a, 0x3f, 0x36, 0x68, 0x03, 0x85, 0xeb, + 0x19, 0xb1, 0x73, 0xa2, 0xe5, 0xc2, 0x4f, 0x8c, 0xd7, 0x3b, 0x70, 0xf1, 0x01, 0x59, 0x5c, 0x89, + 0x97, 0x90, 0xed, 0xeb, 0xc6, 0x89, 0xd8, 0x47, 0x58, 0x0f, 0xa3, 0xc8, 0xe4, 0x5f, 0xa4, 0x77, + 0x73, 0xd5, 0x28, 0x3d, 0x34, 0xf5, 0x10, 0x8a, 0x76, 0xae, 0x6e, 0xad, 0xb4, 0x3f, 0xb1, 0x88, + 0xb0, 0x23, 0xaf, 0xf3, 0xf8, 0x22, 0x0f, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x96, 0x91, 0x0f, + 0xdd, 0x9e, 0x01, 0x00, 0x00, +} diff --git a/examples/events/block-listener/block-listener.go b/examples/events/block-listener/block-listener.go index 30405eb142f..d7dae16e8b8 100644 --- a/examples/events/block-listener/block-listener.go +++ b/examples/events/block-listener/block-listener.go @@ -57,16 +57,17 @@ func (a *adapter) Recv(msg *pb.Event) (bool, error) { a.notfy <- o return true, nil } - if o, e := msg.Event.(*pb.Event_Rejection); e && a.listenToRejections { - a.rejected <- o + if o, e := msg.Event.(*pb.Event_Rejection); e { + if a.listenToRejections { + a.rejected <- o + } return true, nil } if o, e := msg.Event.(*pb.Event_ChaincodeEvent); e { a.cEvent <- o return true, nil } - a.notfy <- nil - return false, nil + return false, fmt.Errorf("Receive unkown type event: %v", msg) } //Disconnected implements consumer.EventAdapter interface for disconnecting diff --git a/examples/sdk/node/Dockerfile b/examples/sdk/node/Dockerfile new file mode 100644 index 00000000000..0d2218dfe00 --- /dev/null +++ b/examples/sdk/node/Dockerfile @@ -0,0 +1,11 @@ +FROM hyperledger/fabric-peer:latest +# setup the chaincode sample +WORKDIR $GOPATH/src/github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 +RUN go build +# build the node SDK +WORKDIR $GOPATH/src/github.com/hyperledger/fabric/sdk/node +RUN make all +# now switch to the sample node app location when the shell is opened in the docker +WORKDIR $GOPATH/src/github.com/hyperledger/fabric/examples/sdk/node +# install the hfc locally for use by the application +RUN npm install $GOPATH/src/github.com/hyperledger/fabric/sdk/node diff --git a/examples/sdk/node/app.js b/examples/sdk/node/app.js new file mode 100644 index 00000000000..dfc5f401025 --- /dev/null +++ b/examples/sdk/node/app.js @@ -0,0 +1,163 @@ +/* + Copyright IBM Corp 2016 All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + * A simple application utilizing the Node.js Client SDK to: + * 1) Enroll a user + * 2) User deploys chaincode + * 3) User queries chaincode + */ +// "HFC" stands for "Hyperledger Fabric Client" +var hfc = require("hfc"); + +console.log(" **** starting HFC sample ****"); + + +// get the addresses from the docker-compose environment +var PEER_ADDRESS = process.env.CORE_PEER_ADDRESS; +var MEMBERSRVC_ADDRESS = process.env.MEMBERSRVC_ADDRESS; + +var chain, chaincodeID; + +// Create a chain object used to interact with the chain. +// You can name it anything you want as it is only used by client. +chain = hfc.newChain("mychain"); +// Initialize the place to store sensitive private key information +chain.setKeyValStore( hfc.newFileKeyValStore('/tmp/keyValStore') ); +// Set the URL to membership services and to the peer +console.log("member services address ="+MEMBERSRVC_ADDRESS); +console.log("peer address ="+PEER_ADDRESS); +chain.setMemberServicesUrl("grpc://"+MEMBERSRVC_ADDRESS); +chain.addPeer("grpc://"+PEER_ADDRESS); + +// The following is required when the peer is started in dev mode +// (i.e. with the '--peer-chaincodedev' option) +var mode = process.env['DEPLOY_MODE']; +console.log("DEPLOY_MODE=" + mode); +if (mode === 'dev') { + chain.setDevMode(true); + //Deploy will not take long as the chain should already be running + chain.setDeployWaitTime(10); +} else { + chain.setDevMode(false); + //Deploy will take much longer in network mode + chain.setDeployWaitTime(120); +} + + +chain.setInvokeWaitTime(10); + +// Begin by enrolling the user +enroll(); + +// Enroll a user. +function enroll() { + console.log("enrolling user admin ..."); + // Enroll "admin" which is preregistered in the membersrvc.yaml + chain.enroll("admin", "Xurw3yU9zI0l", function(err, admin) { + if (err) { + console.log("ERROR: failed to register admin: %s",err); + process.exit(1); + } + // Set this user as the chain's registrar which is authorized to register other users. + chain.setRegistrar(admin); + + var userName = "JohnDoe"; + // registrationRequest + var registrationRequest = { + enrollmentID: userName, + affiliation: "bank_a" + }; + chain.registerAndEnroll(registrationRequest, function(error, user) { + if (error) throw Error(" Failed to register and enroll " + userName + ": " + error); + console.log("Enrolled %s successfully\n", userName); + deploy(user); + }); + }); +} + +// Deploy chaincode +function deploy(user) { + console.log("deploying chaincode; please wait ..."); + // Construct the deploy request + var deployRequest = { + chaincodeName: process.env.CORE_CHAINCODE_ID_NAME, + fcn: "init", + args: ["a", "100", "b", "200"] + }; + // where is the chain code, ignored in dev mode + deployRequest.chaincodePath = "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"; + + // Issue the deploy request and listen for events + var tx = user.deploy(deployRequest); + tx.on('complete', function(results) { + // Deploy request completed successfully + console.log("deploy complete; results: %j",results); + // Set the testChaincodeID for subsequent tests + chaincodeID = results.chaincodeID; + invoke(user); + }); + tx.on('error', function(error) { + console.log("Failed to deploy chaincode: request=%j, error=%k",deployRequest,error); + process.exit(1); + }); + +} + +// Query chaincode +function query(user) { + console.log("querying chaincode ..."); + // Construct a query request + var queryRequest = { + chaincodeID: chaincodeID, + fcn: "query", + args: ["a"] + }; + // Issue the query request and listen for events + var tx = user.query(queryRequest); + tx.on('complete', function (results) { + console.log("query completed successfully; results=%j",results); + process.exit(0); + }); + tx.on('error', function (error) { + console.log("Failed to query chaincode: request=%j, error=%k",queryRequest,error); + process.exit(1); + }); +} + +//Invoke chaincode +function invoke(user) { + console.log("invoke chaincode ..."); + // Construct a query request + var invokeRequest = { + chaincodeID: chaincodeID, + fcn: "invoke", + args: ["a", "b", "1"] + }; + // Issue the invoke request and listen for events + var tx = user.invoke(invokeRequest); + tx.on('submitted', function (results) { + console.log("invoke submitted successfully; results=%j",results); + }); + tx.on('complete', function (results) { + console.log("invoke completed successfully; results=%j",results); + query(user); + }); + tx.on('error', function (error) { + console.log("Failed to invoke chaincode: request=%j, error=%k",invokeRequest,error); + process.exit(1); + }); +} diff --git a/examples/sdk/node/docker-compose.yml b/examples/sdk/node/docker-compose.yml new file mode 100644 index 00000000000..64302ade722 --- /dev/null +++ b/examples/sdk/node/docker-compose.yml @@ -0,0 +1,50 @@ +membersrvc: + # try 'docker ps' to see the container status after starting this compose + container_name: membersrvc + image: hyperledger/fabric-membersrvc + command: membersrvc + +peer: + container_name: peer + image: hyperledger/fabric-peer + environment: + - CORE_PEER_ADDRESSAUTODETECT=true + - CORE_VM_ENDPOINT=unix:///var/run/docker.sock + - CORE_LOGGING_LEVEL=DEBUG + - CORE_PEER_ID=vp0 + - CORE_SECURITY_ENABLED=true + - CORE_PEER_PKI_ECA_PADDR=membersrvc:7054 + - CORE_PEER_PKI_TCA_PADDR=membersrvc:7054 + - CORE_PEER_PKI_TLSCA_PADDR=membersrvc:7054 + - CORE_PEER_VALIDATOR_CONSENSUS_PLUGIN=noops + # this gives access to the docker host daemon to deploy chain code in network mode + volumes: + - /var/run/docker.sock:/var/run/docker.sock + # have the peer wait 10 sec for membersrvc to start + # the following is to run the peer in Developer mode - also set sample DEPLOY_MODE=dev + command: sh -c "sleep 10; peer node start --peer-chaincodedev" + #command: sh -c "sleep 10; peer node start" + links: + - membersrvc + +starter: + container_name: starter + image: hyperledger/fabric-starter-kit + volumes: + # tweak this to map a local developmnt directory tree into the container + - ~/mytest:/user/mytest + environment: + - MEMBERSRVC_ADDRESS=membersrvc:7054 + - PEER_ADDRESS=peer:7051 + - KEY_VALUE_STORE=/tmp/hl_sdk_node_key_value_store + # set to following to 'dev' if peer running in Developer mode + - DEPLOY_MODE=dev + - CORE_CHAINCODE_ID_NAME=mycc + - CORE_PEER_ADDRESS=peer:7051 + # the following command will start the chain code when this container starts and ready it for deployment by the app + command: sh -c "sleep 20; /opt/gopath/src/github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02/chaincode_example02" + stdin_open: true + tty: true + links: + - membersrvc + - peer diff --git a/examples/sdk/node/web-app.js b/examples/sdk/node/web-app.js new file mode 100644 index 00000000000..6dff8f41719 --- /dev/null +++ b/examples/sdk/node/web-app.js @@ -0,0 +1,118 @@ +/* + Copyright IBM Corp 2016 All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/** + * This example shows how to do the following in a web app. + * 1) At initialization time, enroll the web app with the blockchain. + * The identity must have already been registered. + * 2) At run time, after a user has authenticated with the web app: + * a) register and enroll an identity for the user; + * b) use this identity to deploy, query, and invoke a chaincode. + */ +var hfc = require('hfc'); + +//get the addresses from the docker-compose environment +var PEER_ADDRESS = process.env.PEER_ADDRESS; +var MEMBERSRVC_ADDRESS = process.env.MEMBERSRVC_ADDRESS; + +// Create a client chain. +// The name can be anything as it is only used internally. +var chain = hfc.newChain("targetChain"); + +// Configure the KeyValStore which is used to store sensitive keys +// as so it is important to secure this storage. +// The FileKeyValStore is a simple file-based KeyValStore, but you +// can easily implement your own to store whereever you want. +// To work correctly in a cluster, the file-based KeyValStore must +// either be on a shared file system shared by all members of the cluster +// or you must implement you own KeyValStore which all members of the +// cluster can share. +chain.setKeyValStore( hfc.newFileKeyValStore('/tmp/keyValStore') ); + +// Set the URL for membership services +chain.setMemberServicesUrl("grpc://MEMBERSRVC_ADDRESS"); + +// Add at least one peer's URL. If you add multiple peers, it will failover +// to the 2nd if the 1st fails, to the 3rd if both the 1st and 2nd fails, etc. +chain.addPeer("grpc://PEER_ADDRESS"); + +// Enroll "WebAppAdmin" which is already registered because it is +// listed in fabric/membersrvc/membersrvc.yaml with its one time password. +// If "WebAppAdmin" has already been registered, this will still succeed +// because it stores the state in the KeyValStore +// (i.e. in '/tmp/keyValStore' in this sample). +chain.enroll("WebAppAdmin", "DJY27pEnl16d", function(err, webAppAdmin) { + if (err) return console.log("ERROR: failed to register %s: %s",err); + // Successfully enrolled WebAppAdmin during initialization. + // Set this user as the chain's registrar which is authorized to register other users. + chain.setRegistrar(webAppAdmin); + // Now begin listening for web app requests + listenForUserRequests(); +}); + +// Main web app function to listen for and handle requests. This is specific to +// your application but is provided here to demonstrate the pattern. +function listenForUserRequests() { + for (;;) { + // WebApp-specific logic goes here to await the next request. + // ... + // Assume that we received a request from an authenticated user + // and have 'userName' and 'userAccount'. + // Then determined that we need to invoke the chaincode + // with 'chaincodeID' and function named 'fcn' with arguments 'args'. + handleUserRequest(userName,userAccount,chaincodeID,fcn,args); + } +} + +// Handle a user request +function handleUserRequest(userName, userAccount, chaincodeID, fcn, args) { + // Register and enroll this user. + // If this user has already been registered and/or enrolled, this will + // still succeed because the state is kept in the KeyValStore + // (i.e. in '/tmp/keyValStore' in this sample). + var registrationRequest = { + roles: [ 'client' ], + enrollmentID: userName, + affiliation: "bank_a", + attributes: [{name:'role',value:'client'},{name:'account',value:userAccount}] + }; + chain.registerAndEnroll( registrationRequest, function(err, user) { + if (err) return console.log("ERROR: %s",err); + // Issue an invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: chaincodeID, + // Function to trigger + fcn: fcn, + // Parameters for the invoke function + args: args + }; + // Invoke the request from the user object and wait for events to occur. + var tx = user.invoke(invokeRequest); + // Listen for the 'submitted' event + tx.on('submitted', function(results) { + console.log("submitted invoke: %j",results); + }); + // Listen for the 'complete' event. + tx.on('complete', function(results) { + console.log("completed invoke: %j",results); + }); + // Listen for the 'error' event. + tx.on('error', function(err) { + console.log("error on invoke: %j",err); + }); + }); +} \ No newline at end of file diff --git a/gossip/proto/message.pb.go b/gossip/proto/message.pb.go index ffbdd5514a0..72f311ab4a4 100644 --- a/gossip/proto/message.pb.go +++ b/gossip/proto/message.pb.go @@ -42,6 +42,12 @@ var _ = proto1.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package + type DataMessage_Type int32 const ( @@ -70,6 +76,7 @@ var DataMessage_Type_value = map[string]int32{ func (x DataMessage_Type) String() string { return proto1.EnumName(DataMessage_Type_name, int32(x)) } +func (DataMessage_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } type GossipMessage struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` @@ -89,9 +96,10 @@ type GossipMessage struct { Content isGossipMessage_Content `protobuf_oneof:"content"` } -func (m *GossipMessage) Reset() { *m = GossipMessage{} } -func (m *GossipMessage) String() string { return proto1.CompactTextString(m) } -func (*GossipMessage) ProtoMessage() {} +func (m *GossipMessage) Reset() { *m = GossipMessage{} } +func (m *GossipMessage) String() string { return proto1.CompactTextString(m) } +func (*GossipMessage) ProtoMessage() {} +func (*GossipMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type isGossipMessage_Content interface { isGossipMessage_Content() @@ -239,8 +247,8 @@ func (m *GossipMessage) GetChallengeResp() *ConnResponse { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*GossipMessage) XXX_OneofFuncs() (func(msg proto1.Message, b *proto1.Buffer) error, func(msg proto1.Message, tag, wire int, b *proto1.Buffer) (bool, error), []interface{}) { - return _GossipMessage_OneofMarshaler, _GossipMessage_OneofUnmarshaler, []interface{}{ +func (*GossipMessage) XXX_OneofFuncs() (func(msg proto1.Message, b *proto1.Buffer) error, func(msg proto1.Message, tag, wire int, b *proto1.Buffer) (bool, error), func(msg proto1.Message) (n int), []interface{}) { + return _GossipMessage_OneofMarshaler, _GossipMessage_OneofUnmarshaler, _GossipMessage_OneofSizer, []interface{}{ (*GossipMessage_AliveMsg)(nil), (*GossipMessage_MemReq)(nil), (*GossipMessage_MemRes)(nil), @@ -431,47 +439,123 @@ func _GossipMessage_OneofUnmarshaler(msg proto1.Message, tag, wire int, b *proto } } +func _GossipMessage_OneofSizer(msg proto1.Message) (n int) { + m := msg.(*GossipMessage) + // content + switch x := m.Content.(type) { + case *GossipMessage_AliveMsg: + s := proto1.Size(x.AliveMsg) + n += proto1.SizeVarint(2<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_MemReq: + s := proto1.Size(x.MemReq) + n += proto1.SizeVarint(3<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_MemRes: + s := proto1.Size(x.MemRes) + n += proto1.SizeVarint(4<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_DataMsg: + s := proto1.Size(x.DataMsg) + n += proto1.SizeVarint(5<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_Hello: + s := proto1.Size(x.Hello) + n += proto1.SizeVarint(6<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_DataDig: + s := proto1.Size(x.DataDig) + n += proto1.SizeVarint(7<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_DataReq: + s := proto1.Size(x.DataReq) + n += proto1.SizeVarint(8<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_DataUpdate: + s := proto1.Size(x.DataUpdate) + n += proto1.SizeVarint(9<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_AckMsg: + s := proto1.Size(x.AckMsg) + n += proto1.SizeVarint(10<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_Empty: + s := proto1.Size(x.Empty) + n += proto1.SizeVarint(11<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_Challenge: + s := proto1.Size(x.Challenge) + n += proto1.SizeVarint(12<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case *GossipMessage_ChallengeResp: + s := proto1.Size(x.ChallengeResp) + n += proto1.SizeVarint(13<<3 | proto1.WireBytes) + n += proto1.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type ConnChallenge struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` } -func (m *ConnChallenge) Reset() { *m = ConnChallenge{} } -func (m *ConnChallenge) String() string { return proto1.CompactTextString(m) } -func (*ConnChallenge) ProtoMessage() {} +func (m *ConnChallenge) Reset() { *m = ConnChallenge{} } +func (m *ConnChallenge) String() string { return proto1.CompactTextString(m) } +func (*ConnChallenge) ProtoMessage() {} +func (*ConnChallenge) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type ConnResponse struct { PkiID []byte `protobuf:"bytes,1,opt,name=pkiID,proto3" json:"pkiID,omitempty"` ChallengeResponse []byte `protobuf:"bytes,2,opt,name=challengeResponse,proto3" json:"challengeResponse,omitempty"` } -func (m *ConnResponse) Reset() { *m = ConnResponse{} } -func (m *ConnResponse) String() string { return proto1.CompactTextString(m) } -func (*ConnResponse) ProtoMessage() {} +func (m *ConnResponse) Reset() { *m = ConnResponse{} } +func (m *ConnResponse) String() string { return proto1.CompactTextString(m) } +func (*ConnResponse) ProtoMessage() {} +func (*ConnResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type DataRequest struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` - SeqMap []uint64 `protobuf:"varint,2,rep,name=seqMap" json:"seqMap,omitempty"` + SeqMap []uint64 `protobuf:"varint,2,rep,packed,name=seqMap" json:"seqMap,omitempty"` } -func (m *DataRequest) Reset() { *m = DataRequest{} } -func (m *DataRequest) String() string { return proto1.CompactTextString(m) } -func (*DataRequest) ProtoMessage() {} +func (m *DataRequest) Reset() { *m = DataRequest{} } +func (m *DataRequest) String() string { return proto1.CompactTextString(m) } +func (*DataRequest) ProtoMessage() {} +func (*DataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type GossipHello struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` } -func (m *GossipHello) Reset() { *m = GossipHello{} } -func (m *GossipHello) String() string { return proto1.CompactTextString(m) } -func (*GossipHello) ProtoMessage() {} +func (m *GossipHello) Reset() { *m = GossipHello{} } +func (m *GossipHello) String() string { return proto1.CompactTextString(m) } +func (*GossipHello) ProtoMessage() {} +func (*GossipHello) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } type DataUpdate struct { Data []*DataMessage `protobuf:"bytes,1,rep,name=data" json:"data,omitempty"` } -func (m *DataUpdate) Reset() { *m = DataUpdate{} } -func (m *DataUpdate) String() string { return proto1.CompactTextString(m) } -func (*DataUpdate) ProtoMessage() {} +func (m *DataUpdate) Reset() { *m = DataUpdate{} } +func (m *DataUpdate) String() string { return proto1.CompactTextString(m) } +func (*DataUpdate) ProtoMessage() {} +func (*DataUpdate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *DataUpdate) GetData() []*DataMessage { if m != nil { @@ -482,21 +566,23 @@ func (m *DataUpdate) GetData() []*DataMessage { type DataDigest struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` - SeqMap []uint64 `protobuf:"varint,2,rep,name=seqMap" json:"seqMap,omitempty"` + SeqMap []uint64 `protobuf:"varint,2,rep,packed,name=seqMap" json:"seqMap,omitempty"` } -func (m *DataDigest) Reset() { *m = DataDigest{} } -func (m *DataDigest) String() string { return proto1.CompactTextString(m) } -func (*DataDigest) ProtoMessage() {} +func (m *DataDigest) Reset() { *m = DataDigest{} } +func (m *DataDigest) String() string { return proto1.CompactTextString(m) } +func (*DataDigest) ProtoMessage() {} +func (*DataDigest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } type DataMessage struct { Type DataMessage_Type `protobuf:"varint,1,opt,name=type,enum=proto.DataMessage_Type" json:"type,omitempty"` Payload *Payload `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"` } -func (m *DataMessage) Reset() { *m = DataMessage{} } -func (m *DataMessage) String() string { return proto1.CompactTextString(m) } -func (*DataMessage) ProtoMessage() {} +func (m *DataMessage) Reset() { *m = DataMessage{} } +func (m *DataMessage) String() string { return proto1.CompactTextString(m) } +func (*DataMessage) ProtoMessage() {} +func (*DataMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *DataMessage) GetPayload() *Payload { if m != nil { @@ -511,17 +597,19 @@ type Payload struct { Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Payload) Reset() { *m = Payload{} } -func (m *Payload) String() string { return proto1.CompactTextString(m) } -func (*Payload) ProtoMessage() {} +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto1.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } type AckMessage struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` } -func (m *AckMessage) Reset() { *m = AckMessage{} } -func (m *AckMessage) String() string { return proto1.CompactTextString(m) } -func (*AckMessage) ProtoMessage() {} +func (m *AckMessage) Reset() { *m = AckMessage{} } +func (m *AckMessage) String() string { return proto1.CompactTextString(m) } +func (*AckMessage) ProtoMessage() {} +func (*AckMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } type AliveMessage struct { Membership *Member `protobuf:"bytes,1,opt,name=membership" json:"membership,omitempty"` @@ -529,9 +617,10 @@ type AliveMessage struct { Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *AliveMessage) Reset() { *m = AliveMessage{} } -func (m *AliveMessage) String() string { return proto1.CompactTextString(m) } -func (*AliveMessage) ProtoMessage() {} +func (m *AliveMessage) Reset() { *m = AliveMessage{} } +func (m *AliveMessage) String() string { return proto1.CompactTextString(m) } +func (*AliveMessage) ProtoMessage() {} +func (*AliveMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *AliveMessage) GetMembership() *Member { if m != nil { @@ -548,22 +637,24 @@ func (m *AliveMessage) GetTimestamp() *PeerTime { } type PeerTime struct { - IncNumber uint64 `protobuf:"varint,1,opt,name=inc_number" json:"inc_number,omitempty"` + IncNumber uint64 `protobuf:"varint,1,opt,name=inc_number,json=incNumber" json:"inc_number,omitempty"` SeqNum uint64 `protobuf:"varint,2,opt,name=seqNum" json:"seqNum,omitempty"` } -func (m *PeerTime) Reset() { *m = PeerTime{} } -func (m *PeerTime) String() string { return proto1.CompactTextString(m) } -func (*PeerTime) ProtoMessage() {} +func (m *PeerTime) Reset() { *m = PeerTime{} } +func (m *PeerTime) String() string { return proto1.CompactTextString(m) } +func (*PeerTime) ProtoMessage() {} +func (*PeerTime) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } type MembershipRequest struct { SelfInformation *AliveMessage `protobuf:"bytes,1,opt,name=selfInformation" json:"selfInformation,omitempty"` Known []string `protobuf:"bytes,2,rep,name=known" json:"known,omitempty"` } -func (m *MembershipRequest) Reset() { *m = MembershipRequest{} } -func (m *MembershipRequest) String() string { return proto1.CompactTextString(m) } -func (*MembershipRequest) ProtoMessage() {} +func (m *MembershipRequest) Reset() { *m = MembershipRequest{} } +func (m *MembershipRequest) String() string { return proto1.CompactTextString(m) } +func (*MembershipRequest) ProtoMessage() {} +func (*MembershipRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *MembershipRequest) GetSelfInformation() *AliveMessage { if m != nil { @@ -577,9 +668,10 @@ type MembershipResponse struct { Dead []*AliveMessage `protobuf:"bytes,2,rep,name=dead" json:"dead,omitempty"` } -func (m *MembershipResponse) Reset() { *m = MembershipResponse{} } -func (m *MembershipResponse) String() string { return proto1.CompactTextString(m) } -func (*MembershipResponse) ProtoMessage() {} +func (m *MembershipResponse) Reset() { *m = MembershipResponse{} } +func (m *MembershipResponse) String() string { return proto1.CompactTextString(m) } +func (*MembershipResponse) ProtoMessage() {} +func (*MembershipResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *MembershipResponse) GetAlive() []*AliveMessage { if m != nil { @@ -602,18 +694,36 @@ type Member struct { PkiID []byte `protobuf:"bytes,4,opt,name=pkiID,proto3" json:"pkiID,omitempty"` } -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto1.CompactTextString(m) } -func (*Member) ProtoMessage() {} +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto1.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } type Empty struct { } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto1.CompactTextString(m) } -func (*Empty) ProtoMessage() {} +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto1.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func init() { + proto1.RegisterType((*GossipMessage)(nil), "proto.GossipMessage") + proto1.RegisterType((*ConnChallenge)(nil), "proto.ConnChallenge") + proto1.RegisterType((*ConnResponse)(nil), "proto.ConnResponse") + proto1.RegisterType((*DataRequest)(nil), "proto.DataRequest") + proto1.RegisterType((*GossipHello)(nil), "proto.GossipHello") + proto1.RegisterType((*DataUpdate)(nil), "proto.DataUpdate") + proto1.RegisterType((*DataDigest)(nil), "proto.DataDigest") + proto1.RegisterType((*DataMessage)(nil), "proto.DataMessage") + proto1.RegisterType((*Payload)(nil), "proto.Payload") + proto1.RegisterType((*AckMessage)(nil), "proto.AckMessage") + proto1.RegisterType((*AliveMessage)(nil), "proto.AliveMessage") + proto1.RegisterType((*PeerTime)(nil), "proto.PeerTime") + proto1.RegisterType((*MembershipRequest)(nil), "proto.MembershipRequest") + proto1.RegisterType((*MembershipResponse)(nil), "proto.MembershipResponse") + proto1.RegisterType((*Member)(nil), "proto.Member") + proto1.RegisterType((*Empty)(nil), "proto.Empty") proto1.RegisterEnum("proto.DataMessage_Type", DataMessage_Type_name, DataMessage_Type_value) } @@ -621,6 +731,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Gossip service type GossipClient interface { @@ -713,16 +827,22 @@ func (x *gossipGossipStreamServer) Recv() (*GossipMessage, error) { return m, nil } -func _Gossip_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Gossip_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(GossipServer).Ping(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(GossipServer).Ping(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Gossip/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GossipServer).Ping(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) } var _Gossip_serviceDesc = grpc.ServiceDesc{ @@ -742,4 +862,63 @@ var _Gossip_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, + Metadata: fileDescriptor0, +} + +func init() { proto1.RegisterFile("message.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 823 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x95, 0xcb, 0x73, 0x3a, 0x45, + 0x10, 0xc7, 0x79, 0xec, 0x02, 0xdb, 0x40, 0x42, 0xda, 0x94, 0xae, 0x29, 0xad, 0xb2, 0xd6, 0x57, + 0x34, 0x4a, 0x29, 0xc9, 0xc9, 0x94, 0x55, 0x12, 0x40, 0x43, 0x19, 0x30, 0x4e, 0x12, 0xaf, 0xa9, + 0x0d, 0x4c, 0x60, 0x2b, 0xec, 0xec, 0x86, 0x5d, 0xb4, 0x72, 0xf5, 0xe8, 0x9f, 0xe2, 0x5f, 0xe9, + 0xbc, 0xf6, 0x25, 0x70, 0xf8, 0x5d, 0x92, 0xed, 0xee, 0xcf, 0x77, 0xa6, 0x7b, 0xa6, 0xa7, 0x81, + 0xb6, 0x4f, 0xa3, 0xc8, 0x5d, 0xd0, 0x6e, 0xb8, 0x0e, 0xe2, 0x00, 0x4d, 0xf9, 0xcf, 0xf9, 0xdb, + 0x84, 0xf6, 0x2f, 0x41, 0x14, 0x79, 0xe1, 0x44, 0x85, 0xf1, 0x18, 0x4c, 0x16, 0xb0, 0x19, 0xb5, + 0xcb, 0x9f, 0x94, 0x4f, 0x0d, 0xa2, 0x0c, 0xfc, 0x1e, 0x1a, 0xee, 0xca, 0xfb, 0x93, 0x4e, 0xa2, + 0x85, 0x5d, 0xe1, 0x81, 0x66, 0xef, 0x3d, 0xb5, 0x50, 0xb7, 0x2f, 0xdd, 0x4a, 0x7c, 0x5d, 0x22, + 0x29, 0x86, 0x3d, 0xa8, 0xf9, 0xd4, 0x27, 0xf4, 0xd5, 0xae, 0x4a, 0x81, 0xad, 0x05, 0x13, 0xea, + 0x3f, 0xd1, 0x75, 0xb4, 0xf4, 0x42, 0x1e, 0xdb, 0xd0, 0x28, 0xe6, 0x2a, 0x4d, 0xe2, 0xb9, 0xd6, + 0x44, 0xb6, 0x21, 0x35, 0x1f, 0xee, 0xd0, 0x44, 0x61, 0xc0, 0x22, 0x9a, 0x8a, 0x22, 0xec, 0x42, + 0x7d, 0xee, 0xc6, 0xae, 0x48, 0xcd, 0x94, 0x2a, 0xd4, 0xaa, 0xa1, 0xf0, 0xa6, 0x99, 0x25, 0x10, + 0x7e, 0x0d, 0xe6, 0x92, 0xae, 0x56, 0x81, 0x5d, 0x2b, 0xd0, 0xea, 0x18, 0xae, 0x45, 0x84, 0xd3, + 0x0a, 0xc1, 0x6f, 0xd5, 0xda, 0x43, 0x6f, 0x61, 0xd7, 0x25, 0x7d, 0x94, 0x5b, 0x9b, 0x7b, 0x55, + 0xfa, 0x09, 0x93, 0xa4, 0x22, 0x8a, 0x6e, 0x6c, 0xa5, 0x92, 0x95, 0x9b, 0x40, 0xbc, 0x5e, 0x10, + 0x9f, 0x0f, 0x21, 0xff, 0x4b, 0x6d, 0x6b, 0x6b, 0x07, 0x15, 0xe0, 0x8a, 0x1c, 0x86, 0x67, 0x50, + 0x73, 0x67, 0x2f, 0xa2, 0x5c, 0x28, 0x08, 0xfa, 0xdc, 0x99, 0x56, 0xab, 0x11, 0xfc, 0x0c, 0x4c, + 0xea, 0x87, 0xf1, 0x9b, 0xdd, 0x94, 0x6c, 0x4b, 0xb3, 0x23, 0xe1, 0x13, 0x65, 0xca, 0x20, 0x5e, + 0x80, 0x35, 0x5b, 0xba, 0xab, 0x15, 0x65, 0x0b, 0x6a, 0xb7, 0x24, 0x79, 0xac, 0xc9, 0x41, 0xc0, + 0xd8, 0x20, 0x89, 0x71, 0x45, 0x06, 0xe2, 0x25, 0xb4, 0x53, 0x43, 0xdc, 0x8b, 0xdd, 0x2e, 0x74, + 0x86, 0x50, 0xe6, 0xae, 0xab, 0xc8, 0x5e, 0x59, 0x50, 0x9f, 0x05, 0x2c, 0xa6, 0x2c, 0x76, 0x3e, + 0x87, 0x76, 0x61, 0x97, 0xdd, 0x3d, 0xe8, 0x10, 0x68, 0xe5, 0x97, 0x14, 0x54, 0xf8, 0xe2, 0x8d, + 0x87, 0x92, 0x6a, 0x11, 0x65, 0xe0, 0x37, 0x70, 0x54, 0xd8, 0x48, 0xa0, 0xb2, 0x65, 0x5b, 0x64, + 0x3b, 0xe0, 0x5c, 0x42, 0x33, 0x77, 0x35, 0x7b, 0x9a, 0xff, 0x7d, 0xa8, 0x45, 0xf4, 0x75, 0xe2, + 0x86, 0x7c, 0x9d, 0x2a, 0x77, 0x6b, 0xcb, 0xf9, 0x14, 0x9a, 0xb9, 0xa6, 0xd9, 0x93, 0xf5, 0x05, + 0x40, 0x76, 0x93, 0xf8, 0x05, 0x18, 0xe2, 0x26, 0x39, 0x52, 0xdd, 0xdd, 0xa8, 0x44, 0xc6, 0x9d, + 0x1f, 0x94, 0x4a, 0x75, 0xd8, 0x3b, 0xa6, 0xf5, 0x6f, 0x59, 0x15, 0x95, 0xbc, 0xe8, 0x33, 0x30, + 0xe2, 0xb7, 0x50, 0x89, 0x0f, 0x7a, 0x1f, 0x6c, 0xef, 0xd9, 0xbd, 0xe7, 0x61, 0x22, 0x21, 0x3c, + 0x85, 0x7a, 0xe8, 0xbe, 0xad, 0x02, 0x77, 0xae, 0xdf, 0xf9, 0x81, 0xe6, 0x6f, 0x95, 0x97, 0x24, + 0x61, 0x67, 0x08, 0x86, 0xd0, 0x61, 0x1b, 0xac, 0x87, 0xe9, 0x70, 0xf4, 0xf3, 0x78, 0x3a, 0x1a, + 0x76, 0x4a, 0x68, 0x81, 0x79, 0x75, 0xf3, 0xdb, 0xe0, 0xd7, 0x4e, 0x19, 0x9b, 0x50, 0xff, 0xa3, + 0x7f, 0xf3, 0x48, 0x46, 0xbf, 0x77, 0x2a, 0x99, 0x71, 0xd7, 0xa9, 0x62, 0x03, 0x8c, 0xc1, 0x88, + 0xdc, 0x77, 0x0c, 0x67, 0x0c, 0x75, 0xbd, 0xb2, 0xae, 0x67, 0xba, 0xf1, 0x75, 0x99, 0xda, 0x42, + 0x04, 0x63, 0xe9, 0x46, 0x4b, 0x99, 0x8f, 0x45, 0xe4, 0xb7, 0xf0, 0xc9, 0x73, 0xac, 0xca, 0x8b, + 0x55, 0x67, 0xe6, 0x00, 0x64, 0x4f, 0x60, 0xcf, 0x6d, 0xfc, 0x53, 0x86, 0x56, 0x7e, 0x62, 0xf1, + 0x07, 0x0e, 0x7e, 0x3a, 0x5c, 0x24, 0xdb, 0xec, 0xb5, 0x0b, 0x53, 0x87, 0xe4, 0x00, 0x8e, 0x5b, + 0xb1, 0xc7, 0x27, 0x69, 0xec, 0xfa, 0xa1, 0x3e, 0xa0, 0xc3, 0xe4, 0x80, 0x28, 0x5d, 0xdf, 0xf3, + 0x18, 0xc9, 0x08, 0xfc, 0x08, 0xac, 0xc8, 0x5b, 0x30, 0x37, 0xde, 0xac, 0xa9, 0xce, 0x35, 0x73, + 0x38, 0x7d, 0x68, 0x24, 0x22, 0xfc, 0x18, 0xc0, 0x63, 0xb3, 0x47, 0xb6, 0x11, 0x5b, 0xe9, 0x9c, + 0x2d, 0xee, 0x99, 0x4a, 0x47, 0xee, 0x6c, 0x2a, 0xf9, 0xb3, 0x71, 0x96, 0x70, 0xb4, 0x35, 0x4f, + 0xf1, 0x47, 0x38, 0x8c, 0xe8, 0xea, 0x79, 0xcc, 0x9e, 0x83, 0xb5, 0xef, 0xc6, 0x5e, 0xc0, 0x74, + 0x61, 0xbb, 0x66, 0x36, 0xf9, 0x3f, 0x2b, 0x4e, 0xee, 0x85, 0x05, 0x7f, 0x31, 0xd9, 0x56, 0x16, + 0x51, 0x06, 0xdf, 0x09, 0xb7, 0xa7, 0x30, 0x7e, 0x05, 0xa6, 0x1c, 0xf8, 0xba, 0xa1, 0x77, 0x6e, + 0xa0, 0x08, 0xfc, 0x92, 0x5f, 0x19, 0x95, 0x6d, 0xb5, 0x97, 0x94, 0x80, 0xf3, 0x0c, 0x35, 0xb5, + 0x13, 0x1e, 0x40, 0xc5, 0x9b, 0xcb, 0xdc, 0x2d, 0xc2, 0xbf, 0xf0, 0x04, 0x1a, 0x94, 0xcd, 0xc3, + 0xc0, 0x63, 0xb1, 0xee, 0x86, 0xd4, 0x16, 0x31, 0x9f, 0xc6, 0x6e, 0xae, 0x2b, 0x52, 0x3b, 0x9b, + 0x14, 0x46, 0x6e, 0x52, 0x38, 0x75, 0x30, 0xe5, 0x18, 0xec, 0x85, 0x50, 0x53, 0xef, 0x18, 0x7f, + 0x82, 0x96, 0xfa, 0xba, 0x8b, 0xd7, 0xd4, 0xf5, 0xf1, 0xb8, 0xf0, 0xdb, 0xa0, 0xd3, 0x3c, 0xd9, + 0xe9, 0x75, 0x4a, 0xa7, 0xe5, 0xef, 0xca, 0x7c, 0xde, 0x1a, 0xb7, 0x1e, 0x5b, 0x60, 0x61, 0xd0, + 0x9e, 0x14, 0x2c, 0xa7, 0xf4, 0x54, 0x93, 0xe6, 0xf9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x55, + 0x49, 0x77, 0x6f, 0x95, 0x07, 0x00, 0x00, } diff --git a/images/app/Dockerfile.in b/images/app/Dockerfile.in index 3cff23f8181..6a971f3a871 100644 --- a/images/app/Dockerfile.in +++ b/images/app/Dockerfile.in @@ -1,4 +1,4 @@ -FROM hyperledger/fabric-src:latest +FROM hyperledger/fabric-src:_TAG_ RUN mkdir -p /var/hyperledger/db COPY bin/* $GOPATH/bin/ WORKDIR $GOPATH/src/github.com/hyperledger/fabric diff --git a/images/base/.gitignore b/images/base/.gitignore deleted file mode 100644 index 94143827ed0..00000000000 --- a/images/base/.gitignore +++ /dev/null @@ -1 +0,0 @@ -Dockerfile diff --git a/images/base/Dockerfile.in b/images/base/Dockerfile.in deleted file mode 100644 index 3babb952831..00000000000 --- a/images/base/Dockerfile.in +++ /dev/null @@ -1,5 +0,0 @@ -FROM _DOCKER_BASE_ -COPY scripts /hyperledger/baseimage/scripts -RUN /hyperledger/baseimage/scripts/common/init.sh -RUN /hyperledger/baseimage/scripts/docker/init.sh -RUN /hyperledger/baseimage/scripts/common/setup.sh diff --git a/images/base/Makefile b/images/base/Makefile deleted file mode 100644 index 740274cd045..00000000000 --- a/images/base/Makefile +++ /dev/null @@ -1,49 +0,0 @@ -NAME=hyperledger/fabric-baseimage -VERSION=$(shell cat ./release) -ARCH=$(shell uname -m) -DOCKER_TAG ?= $(ARCH)-$(VERSION) -VAGRANTIMAGE=packer_virtualbox-iso_virtualbox.box - -DOCKER_BASE_x86_64=ubuntu:trusty -DOCKER_BASE_s390x=s390x/ubuntu:xenial - -DOCKER_BASE=$(DOCKER_BASE_$(ARCH)) - -ifeq ($(DOCKER_BASE), ) -$(error "Architecture \"$(ARCH)\" is unsupported") -endif - - -# strips off the post-processors that try to upload artifacts to the cloud -packer-local.json: packer.json - jq 'del(."post-processors"[0][1]) | del(."post-processors"[1][1])' packer.json > $@ - -all: vagrant docker - -$(VAGRANTIMAGE): packer-local.json - BASEIMAGE_RELEASE=$(VERSION) \ - packer build -only virtualbox-iso packer-local.json - -Dockerfile: Dockerfile.in Makefile - @echo "# Generated from Dockerfile.in. DO NOT EDIT!" > $@ - @cat Dockerfile.in | \ - sed -e "s|_DOCKER_BASE_|$(DOCKER_BASE)|" >> $@ - -docker: Dockerfile release - @echo "Generating docker" - @docker build -t $(NAME):$(DOCKER_TAG) . - -vagrant: $(VAGRANTIMAGE) remove release - vagrant box add -name $(NAME) $(VAGRANTIMAGE) - -push: - @echo "You will need your ATLAS_TOKEN set for this to succeed" - packer push -name $(NAME) packer.json - -remove: - -vagrant box remove --box-version 0 $(NAME) - -clean: remove - -rm $(VAGRANTIMAGE) - -rm Dockerfile - -rm packer-local.json diff --git a/images/base/README.md b/images/base/README.md deleted file mode 100644 index 6073455aca5..00000000000 --- a/images/base/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Baseimage Introduction -This directory contains the infrastructure for creating a new baseimage used as the basis for various functions within the Hyperledger workflow such as our Vagrant based development environment, chaincode compilation/execution, unit-testing, and even cluster simulation. It is based on ubuntu-14.04 with various opensource projects added such as golang, rocksdb, grpc, and node.js. The actual Hyperledger code is injected just-in-time before deployment. The resulting images are published to public repositories such as [atlas.hashicorp.com](https://atlas.hashicorp.com/hyperledger/boxes/fabric-baseimage) for consumption by Vagrant/developers and [hub.docker.com](https://hub.docker.com/r/hyperledger/fabric-baseimage/) for consumption by docker-based workflows. - -![Baseimage Architectural Overview](./images/packer-overview.png) - -The purpose of this baseimage is to act as a bridge between a raw ubuntu/trusty64 configuration and the customizations required for supporting a hyperledger environment. Some of the FOSS components that need to be added to Ubuntu do not have convenient native packages. Therefore, they are built from source. However, the build process is generally expensive (often taking in excess of 30 minutes) so it is fairly inefficient to JIT assemble these components on demand. - -Therefore, the expensive FOSS components are built into this baseimage once and subsequently cached on the public repositories so that workflows may simply consume the objects without requiring a local build cycle. - -# Intended Audience -This is only intended for release managers curating the base images on atlas and docker-hub. Typical developers may safely ignore this directory completely. - -Anyone wishing to customize their image is encouraged to do so via downstream means, such as the vagrant infrastructure in the root directory of this project or the Dockerfile. - -## Exceptions - -If a component is found to be both broadly applicable and expensive to build JIT, it may be a candidate for inclusion in a future baseimage. - -# Usage - -## Usage Pattern 1 - Local baseimage builds for testing a proposed change - -* "make vagrant" will build just the vagrant image and install it into the local environment as "hyperledger/fabric-baseimage:v0", making it suitable to local testing. - * To utilize the new base image in your local tests, run `vagrant destroy` then `USE_LOCAL_BASEIMAGE=true vagrant up`, also preface `vagrant ssh` as `USE_LOCAL_BASEIMAGE=true vagrant ssh` or simply export that variable, or Vagrant will fail to find the ssh key. -* "make docker" will build just the docker image and commit it to your local environment as "hyperledger/fabric-baseimage" - -## Usage Pattern 2 - Release manager promoting a new base image to the public repositories - -- Step 1: Decide on the version number to be used and update the packer.json template variables:release -- Step 2: Initiate a build - -Note: You will need credentials to the public repositories, as discussed in Uploading Permissions below. If you do not have these credentials, you are probably not an image release manager. Otherwise, discuss it on the Hyperledger slack to see if you should be added. - -### Hosted Build Method - -"make push" will push the build configuration to atlas for cloud-hosted building of the images. You only need to have the ATLAS_TOKEN defined for this to succeed, as the atlas build server will push the artifacts out to the respective hosts once the build completes. Therefore, the repository credentials are already cached on the build server and you only need credentials for the build-server itself. You can check the status of the build [here](https://atlas.hashicorp.com/hyperledger/build-configurations/baseimage/) - -### Local Build Method - -"make [all]" will generate both a vagrant and docker image and push them out to the cloud. This method requires both ATLAS and DOCKERHUB credentials since the artifacts are pushed directly to the hosting providers from your build machine. - -## Uploading Permissions - -The system relies on several environment variables to establish credentials with the hosting repositories: - -* ATLAS_TOKEN - used to push both vagrant images and packer templates to atlas.hashicorp.com -* DOCKERHUB_[EMAIL|USERNAME|PASSWORD] - used to push docker images to hub.docker.com - -Note that if you only plan on pushing the build to the atlas packer build service, you only need the ATLAS_TOKEN set as the dockerhub interaction will occur from the atlas side of the process where the docker credentials are presumably already configured. - -## Versioning - -Vagrant boxes are only versioned when they are submitted to a repository. Vagrant does not support applying a version to a vagrant box via the `vagrant box add` command. Adding the box gives it an implicit version of 0. Setting `USE_LOCAL_BASEIMAGE=true` in the `vagrant up` command causes the Vagrant file in the the parent directory to pick version 0, instead of the default. diff --git a/images/base/http/preseed.cfg b/images/base/http/preseed.cfg deleted file mode 100644 index d3619ce76f7..00000000000 --- a/images/base/http/preseed.cfg +++ /dev/null @@ -1,34 +0,0 @@ -debconf debconf/frontend select Noninteractive -choose-mirror-bin mirror/http/proxy string -d-i clock-setup/utc boolean true -d-i clock-setup/utc-auto boolean true -d-i finish-install/reboot_in_progress note -d-i grub-installer/only_debian boolean true -d-i grub-installer/with_other_os boolean true -d-i partman-auto/choose_recipe select atomic -d-i partman-auto/method string regular -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true -d-i partman/confirm_write_new_label boolean true - -# Set the kernel -d-i base-installer/kernel/override-image string linux-virtual - -# Default user -d-i passwd/user-fullname string vagrant -d-i passwd/username string vagrant -d-i passwd/user-password password vagrant -d-i passwd/user-password-again password vagrant -d-i passwd/username string vagrant - -# Minimum packages (see postinstall.sh) -d-i pkgsel/include string openssh-server -d-i pkgsel/install-language-support boolean false -d-i pkgsel/update-policy select none -d-i pkgsel/upgrade select none - -d-i time/zone string UTC -d-i user-setup/allow-password-weak boolean true -d-i user-setup/encrypt-home boolean false -tasksel tasksel/first multiselect standard, server diff --git a/images/base/images/packer-overview.graffle/data.plist b/images/base/images/packer-overview.graffle/data.plist deleted file mode 100644 index c6fc4f1b778..00000000000 --- a/images/base/images/packer-overview.graffle/data.plist +++ /dev/null @@ -1,1383 +0,0 @@ - - - - - ActiveLayerIndex - 0 - ApplicationVersion - - com.omnigroup.OmniGraffle - 139.18.0.187838 - - AutoAdjust - - BackgroundGraphic - - Bounds - {{0, 0}, {575.99998474121094, 1466}} - Class - SolidGraphic - ID - 2 - Style - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - BaseZoom - 0 - CanvasOrigin - {0, 0} - ColumnAlign - 1 - ColumnSpacing - 36 - CreationDate - 2016-02-22 22:11:37 +0000 - Creator - Greg Haskins - DisplayScale - 1 0/72 in = 1.0000 in - GraphDocumentVersion - 8 - GraphicsList - - - Class - LineGraphic - Head - - ID - 22 - - ID - 60 - Points - - {310.14783447722266, 96.412900484504561} - {373.3780487804878, 189} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 4 - - - - Class - LineGraphic - Head - - ID - 57 - - ID - 59 - OrthogonalBarAutomatic - - OrthogonalBarPoint - {0, 0} - OrthogonalBarPosition - 76.855720520019531 - Points - - {245.3613785038917, 96.388558716933716} - {108, 266} - {181, 753} - {314.5000004554858, 752.81861412981596} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - LineType - 2 - TailArrow - 0 - Width - 2 - - - Tail - - ID - 4 - - - - Class - LineGraphic - Head - - ID - 57 - - ID - 58 - Points - - {364.99937473792534, 505.12549205971999} - {364.99580448417515, 724.24999998950307} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 54 - - - - Bounds - {{315, 724.75}, {100, 56}} - Class - ShapedGraphic - ID - 57 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 custom environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Class - LineGraphic - Head - - ID - 30 - - ID - 56 - Points - - {388.73435760680411, 486.3256814805485} - {439.2620074228966, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 54 - - - - Class - LineGraphic - Head - - ID - 40 - - ID - 55 - Points - - {342.97352711499752, 487.69524871963358} - {300.58818602417603, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 54 - - - - Bounds - {{317.5, 428.875}, {95, 75.75}} - Class - ShapedGraphic - ID - 54 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 hyperledger fabric} - VerticalPad - 0 - - - - Class - LineGraphic - Head - - ID - 49 - - ID - 53 - Points - - {464.62462462462463, 569.5} - {464.82882888295114, 603.50000901136877} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 30 - - - - Bounds - {{433, 622}, {100, 56}} - Class - ShapedGraphic - ID - 51 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Bounds - {{424, 613}, {100, 56}} - Class - ShapedGraphic - ID - 50 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Bounds - {{415, 604}, {100, 56}} - Class - ShapedGraphic - ID - 49 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Class - LineGraphic - Head - - ID - 30 - - ID - 48 - Points - - {508.88537781669504, 503.46248035776267} - {484.83665338645471, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 46 - - - - Class - LineGraphic - Head - - ID - 30 - - ID - 47 - Points - - {466.75138277659562, 426.49991523085737} - {464.88213627992633, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 35 - - - - Bounds - {{495, 467.375}, {62, 37.25}} - Class - ShapedGraphic - ID - 46 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 Dockerfile} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Bounds - {{486, 458.375}, {62, 37.25}} - Class - ShapedGraphic - ID - 45 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 Dockerfile} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Bounds - {{477, 449.375}, {62, 37.25}} - Class - ShapedGraphic - ID - 44 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 Dockerfile} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Class - LineGraphic - Head - - ID - 24 - - ID - 43 - Points - - {277.52222222222446, 574} - {279.23335524784636, 612.50049307640984} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 40 - - - - Bounds - {{230.5, 613}, {100, 56}} - Class - ShapedGraphic - ID - 24 - Shape - FlattenedRectangle - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 development environment} - VerticalPad - 0 - - TextRelativeArea - {{0.10000000000000001, 0}, {0.80000000000000004, 1}} - - - Class - LineGraphic - Head - - ID - 40 - - ID - 41 - Points - - {276.50000301611595, 426.5} - {276.50000301611595, 528} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 33 - - - - Bounds - {{202, 528}, {149, 46}} - Class - ShapedGraphic - ID - 40 - ImageID - 6 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Bounds - {{436, 326.75}, {62, 46}} - Class - ShapedGraphic - ID - 38 - ImageID - 5 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Class - LineGraphic - Head - - ID - 35 - - ID - 37 - Points - - {467.00000967504548, 372.75} - {467.00000967504548, 399.5} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 38 - - - - Class - LineGraphic - Head - - ID - 33 - - ID - 36 - Points - - {276.49991756300551, 370.5} - {276.49980234985657, 399.5} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 28 - - - - Bounds - {{411, 400}, {112, 26}} - Class - ShapedGraphic - ID - 35 - Shape - Rectangle - Style - - stroke - - CornerRadius - 9 - - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 docker:hyperledger/fabric-baseimage} - - - - Bounds - {{220.5, 400}, {112, 26}} - Class - ShapedGraphic - ID - 33 - Shape - Rectangle - Style - - stroke - - CornerRadius - 9 - - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 vagrant:hyperledger/fabric-baseimage} - - - - Class - LineGraphic - Head - - ID - 38 - - ID - 32 - Points - - {408.21374764707127, 245} - {454.09227872378818, 326.75} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 22 - - - - Class - LineGraphic - Head - - ID - 28 - - ID - 31 - Points - - {368.03295668549907, 245} - {294.63182674199624, 329} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 22 - - - - Bounds - {{390, 528}, {149, 41.5}} - Class - ShapedGraphic - ID - 30 - ImageID - 4 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Bounds - {{199, 329}, {155, 41.5}} - Class - ShapedGraphic - ID - 28 - ImageID - 3 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Class - LineGraphic - Head - - ID - 22 - - ID - 26 - Points - - {435.32867419331808, 159.90087452433639} - {413.63683982367951, 189} - - Style - - stroke - - HeadArrow - FilledArrow - Legacy - - TailArrow - 0 - Width - 2 - - - Tail - - ID - 20 - - - - Bounds - {{390, 103.5}, {133, 56}} - Class - ShapedGraphic - ID - 20 - Shape - Hexagon - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 hyperledger/\ -fabric-baseimage} - VerticalPad - 0 - - TextRelativeArea - {{0, 0.10000000000000001}, {1, 0.80000000000000004}} - - - Bounds - {{331, 189}, {123, 56}} - Class - ShapedGraphic - ID - 22 - ImageID - 2 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - - Bounds - {{342, 18}, {73, 59}} - Class - ShapedGraphic - ID - 10 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 ubuntu} - VerticalPad - 0 - - - - Bounds - {{264, 18}, {73, 59}} - Class - ShapedGraphic - ID - 9 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 nodejs} - VerticalPad - 0 - - - - Bounds - {{186, 18}, {73, 59}} - Class - ShapedGraphic - ID - 8 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 grpc} - VerticalPad - 0 - - - - Bounds - {{108, 18}, {73, 59}} - Class - ShapedGraphic - ID - 7 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 golang} - VerticalPad - 0 - - - - Bounds - {{30, 18}, {73, 59}} - Class - ShapedGraphic - ID - 6 - Shape - Diamond - Style - - Text - - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc - -\f0\fs22 \cf0 rocksdb} - VerticalPad - 0 - - - - Bounds - {{12, 10}, {537, 86}} - Class - ShapedGraphic - ID - 4 - Shape - Rectangle - Style - - fill - - Draws - NO - - stroke - - CornerRadius - 9 - Pattern - 1 - - - Text - - Align - 2 - Text - {\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf460 -\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qr - -\f0\fs22 \cf0 Upstream FOSS} - - - - GridInfo - - GuidesLocked - NO - GuidesVisible - YES - HPages - 1 - ImageCounter - 7 - ImageLinkBack - - - - - - - - ImageList - - image6.png - image5.png - image4.png - image3.png - image2.png - - KeepToScale - - Layers - - - Lock - NO - Name - Layer 1 - Print - YES - View - YES - - - LayoutInfo - - Animate - NO - circoMinDist - 18 - circoSeparation - 0.0 - layoutEngine - dot - neatoSeparation - 0.0 - twopiSeparation - 0.0 - - LinksVisible - NO - MagnetsVisible - NO - MasterSheets - - ModificationDate - 2016-04-08 16:20:56 +0000 - Modifier - Greg Haskins - NotesVisible - NO - Orientation - 2 - OriginVisible - NO - PageBreaks - YES - PrintInfo - - NSBottomMargin - - float - 41 - - NSHorizonalPagination - - coded - BAtzdHJlYW10eXBlZIHoA4QBQISEhAhOU051bWJlcgCEhAdOU1ZhbHVlAISECE5TT2JqZWN0AIWEASqEhAFxlwCG - - NSLeftMargin - - float - 18 - - NSPaperSize - - size - {611.99998474121094, 792} - - NSPrintReverseOrientation - - int - 0 - - NSRightMargin - - float - 18 - - NSTopMargin - - float - 18 - - - PrintOnePage - - ReadOnly - NO - RowAlign - 1 - RowSpacing - 36 - SheetTitle - Canvas 1 - SmartAlignmentGuidesActive - YES - SmartDistanceGuidesActive - YES - UniqueID - 1 - UseEntirePage - - VPages - 2 - WindowInfo - - CurrentSheet - 0 - ExpandedCanvases - - - name - Canvas 1 - - - Frame - {{69, 50}, {1406, 1115}} - ListView - - OutlineWidth - 142 - RightSidebar - - ShowRuler - - Sidebar - - SidebarWidth - 120 - VisibleRegion - {{-340, 0}, {1257, 960}} - Zoom - 1 - ZoomValues - - - Canvas 1 - 1 - 1 - - - - - diff --git a/images/base/images/packer-overview.graffle/image2.png b/images/base/images/packer-overview.graffle/image2.png deleted file mode 100644 index 7e5bdd51c01..00000000000 Binary files a/images/base/images/packer-overview.graffle/image2.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image3.png b/images/base/images/packer-overview.graffle/image3.png deleted file mode 100644 index 437a535b657..00000000000 Binary files a/images/base/images/packer-overview.graffle/image3.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image4.png b/images/base/images/packer-overview.graffle/image4.png deleted file mode 100644 index 045093e2b58..00000000000 Binary files a/images/base/images/packer-overview.graffle/image4.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image5.png b/images/base/images/packer-overview.graffle/image5.png deleted file mode 100644 index 0a367e5d9dc..00000000000 Binary files a/images/base/images/packer-overview.graffle/image5.png and /dev/null differ diff --git a/images/base/images/packer-overview.graffle/image6.png b/images/base/images/packer-overview.graffle/image6.png deleted file mode 100644 index efbf05abd22..00000000000 Binary files a/images/base/images/packer-overview.graffle/image6.png and /dev/null differ diff --git a/images/base/images/packer-overview.png b/images/base/images/packer-overview.png deleted file mode 100644 index 66c9087c63b..00000000000 Binary files a/images/base/images/packer-overview.png and /dev/null differ diff --git a/images/base/packer.json b/images/base/packer.json deleted file mode 100644 index 03f9f3a8dce..00000000000 --- a/images/base/packer.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "push": { - "name": "", - "vcs": true - }, - "variables": { - "artifact": "baseimage", - "release": "{{env `BASEIMAGE_RELEASE`}}", - "dockerhub_email": "{{env `DOCKERHUB_EMAIL`}}", - "dockerhub_username": "{{env `DOCKERHUB_USERNAME`}}", - "dockerhub_password": "{{env `DOCKERHUB_PASSWORD`}}" - }, - "provisioners": [ - { - "type": "shell", - "environment_vars": ["BASEIMAGE_RELEASE={{user `release`}}"], - "override": { - "virtualbox-iso": { - "scripts": [ - "scripts/common/init.sh", - "scripts/vagrant/init.sh", - "scripts/vagrant/virtualbox.sh", - "scripts/vagrant/vagrant.sh", - "scripts/common/setup.sh", - "scripts/vagrant/cleanup.sh", - "scripts/vagrant/zerodisk.sh" - ], - "execute_command": "echo 'vagrant'|sudo -S {{.Vars}} bash '{{.Path}}'" - } - } - } - ], - "builders": [ - { - "type": "virtualbox-iso", - "boot_command": [ - "", - "", - "", - "/install/vmlinuz", - " auto", - " console-setup/ask_detect=false", - " console-setup/layoutcode=us", - " console-setup/modelcode=pc105", - " debian-installer=en_US", - " fb=false", - " initrd=/install/initrd.gz", - " kbd-chooser/method=us", - " keyboard-configuration/layout=USA", - " keyboard-configuration/variant=USA", - " locale=en_US", - " netcfg/get_hostname=ubuntu-1404", - " netcfg/get_domain=vagrantup.com", - " noapic", - " preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg", - " -- ", - "" - ], - "headless": true, - "boot_wait": "10s", - "disk_size": 40960, - "guest_os_type": "Ubuntu_64", - "http_directory": "http", - "iso_checksum": "ca2531b8cd79ea5b778ede3a524779b9", - "iso_checksum_type": "md5", - "iso_url": "http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04.1-server-amd64.iso", - "ssh_username": "vagrant", - "ssh_password": "vagrant", - "ssh_port": 22, - "ssh_wait_timeout": "10000s", - "shutdown_command": "echo '/sbin/halt -h -p' > shutdown.sh; echo 'vagrant'|sudo -S bash 'shutdown.sh'", - "virtualbox_version_file": ".vbox_version", - "guest_additions_mode": "disable", - "hard_drive_interface": "sata", - "vboxmanage": [ - ["modifyvm", "{{.Name}}", "--vrde", "off"], - ["modifyvm", "{{.Name}}", "--pae", "off"], - ["modifyvm", "{{.Name}}", "--paravirtprovider", "legacy"] - ] - } - ], - "post-processors": [ - [ - { - "type": "vagrant", - "only": [ - "virtualbox-iso" - ], - "keep_input_artifact": false - }, - { - "type": "atlas", - "only": [ - "virtualbox-iso" - ], - "artifact": "hyperledger/{{user `artifact`}}", - "artifact_type": "vagrant.box", - "metadata": { - "provider": "virtualbox", - "version": "{{user `release`}}" - } - } - ] - ] -} diff --git a/images/base/release b/images/base/release deleted file mode 100644 index 7c1886bb9f4..00000000000 --- a/images/base/release +++ /dev/null @@ -1 +0,0 @@ -0.0.10 diff --git a/images/base/scripts/common/golang_crossCompileSetup.sh b/images/base/scripts/common/golang_crossCompileSetup.sh deleted file mode 100755 index 0d7468203d7..00000000000 --- a/images/base/scripts/common/golang_crossCompileSetup.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -helpme() -{ - cat </etc/profile.d/goroot.sh -export GOROOT=$GOROOT -export GOPATH=$GOPATH -export PATH=\$PATH:$GOROOT/bin:$GOPATH/bin -EOF - - -# Install NodeJS - -if [ x$MACHINE = xs390x ] -then - apt-get install --yes nodejs -else - NODE_VER=0.12.7 - NODE_PACKAGE=node-v$NODE_VER-linux-x64.tar.gz - TEMP_DIR=/tmp - SRC_PATH=$TEMP_DIR/$NODE_PACKAGE - - # First remove any prior packages downloaded in case of failure - cd $TEMP_DIR - rm -f node*.tar.gz - wget --quiet https://nodejs.org/dist/v$NODE_VER/$NODE_PACKAGE - cd /usr/local && sudo tar --strip-components 1 -xzf $SRC_PATH -fi - -# Install GRPC - -# ---------------------------------------------------------------- -# NOTE: For instructions, see https://github.com/google/protobuf -# -# ---------------------------------------------------------------- - -# First install protoc -cd /tmp -git clone https://github.com/google/protobuf.git -cd protobuf -git checkout v3.0.0-beta-3 -#unzip needed for ./autogen.sh -apt-get install -y unzip -apt-get install -y autoconf -apt-get install -y build-essential libtool -./autogen.sh -# NOTE: By default, the package will be installed to /usr/local. However, on many platforms, /usr/local/lib is not part of LD_LIBRARY_PATH. -# You can add it, but it may be easier to just install to /usr instead. -# -# To do this, invoke configure as follows: -# -# ./configure --prefix=/usr -# -#./configure -./configure --prefix=/usr - -if [ x$MACHINE = xs390x ] -then - echo FIXME: protobufs wont compile on 390, missing atomic call -else - make - make check - make install -fi -export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH -cd ~/ - -# Install rocksdb -apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev -cd /tmp -git clone https://github.com/facebook/rocksdb.git -cd rocksdb -git checkout tags/v4.1 -if [ x$MACHINE = xs390x ] -then - echo There were some bugs in 4.1 for x/p, dev stream has the fix, living dangereously, fixing in place - sed -i -e "s/-march=native/-march=z196/" build_tools/build_detect_platform - sed -i -e "s/-momit-leaf-frame-pointer/-DDUMBDUMMY/" Makefile -fi - -PORTABLE=1 make shared_lib -INSTALL_PATH=/usr/local make install-shared -ldconfig -cd ~/ - -# Make our versioning persistent -echo $BASEIMAGE_RELEASE > /etc/hyperledger-baseimage-release - -# clean up our environment -apt-get -y autoremove -apt-get clean -rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* diff --git a/images/base/scripts/docker/init.sh b/images/base/scripts/docker/init.sh deleted file mode 100755 index 4e1e5c95c50..00000000000 --- a/images/base/scripts/docker/init.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -apt-get update -apt-get install -y wget diff --git a/images/base/scripts/vagrant/cleanup.sh b/images/base/scripts/vagrant/cleanup.sh deleted file mode 100644 index ddbc4b0b493..00000000000 --- a/images/base/scripts/vagrant/cleanup.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# Removing leftover leases and persistent rules -echo "cleaning up dhcp leases" -rm /var/lib/dhcp/* - -# Make sure Udev doesn't block our network -echo "cleaning up udev rules" -rm /etc/udev/rules.d/70-persistent-net.rules -mkdir /etc/udev/rules.d/70-persistent-net.rules -rm -rf /dev/.udev/ -rm /lib/udev/rules.d/75-persistent-net-generator.rules - -echo "Adding a 2 sec delay to the interface up, to make the dhclient happy" -echo "pre-up sleep 2" >> /etc/network/interfaces diff --git a/images/base/scripts/vagrant/init.sh b/images/base/scripts/vagrant/init.sh deleted file mode 100755 index 3c10c5a5b39..00000000000 --- a/images/base/scripts/vagrant/init.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -set -x - -# Install headers so that we may build the vbox drivers layer -apt-get install -y build-essential zlib1g-dev libssl-dev libreadline-gplv2-dev unzip - -sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=sudo' /etc/sudoers -sed -i -e 's/%sudo ALL=(ALL:ALL) ALL/%sudo ALL=NOPASSWD:ALL/g' /etc/sudoers - -# Tweak sshd to prevent DNS resolution (speed up logins) -echo 'UseDNS no' >> /etc/ssh/sshd_config - -# Remove 5s grub timeout to speed up booting -cat < /etc/default/grub -# If you change this file, run 'update-grub' afterwards to update -# /boot/grub/grub.cfg. - -GRUB_DEFAULT=0 -GRUB_TIMEOUT=0 -GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian` -GRUB_CMDLINE_LINUX_DEFAULT="quiet" -GRUB_CMDLINE_LINUX="debian-installer=en_US" -EOF - -update-grub diff --git a/images/base/scripts/vagrant/vagrant.sh b/images/base/scripts/vagrant/vagrant.sh deleted file mode 100755 index d43ab6351c1..00000000000 --- a/images/base/scripts/vagrant/vagrant.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -e -set -x - -# Vagrant specific -date > /etc/vagrant_box_build_time - -# Installing vagrant keys -mkdir -pm 700 /home/vagrant/.ssh -wget --no-check-certificate 'https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub' -O /home/vagrant/.ssh/authorized_keys -chmod 0600 /home/vagrant/.ssh/authorized_keys -chown -R vagrant /home/vagrant/.ssh diff --git a/images/base/scripts/vagrant/virtualbox.sh b/images/base/scripts/vagrant/virtualbox.sh deleted file mode 100755 index a8b2aeb1531..00000000000 --- a/images/base/scripts/vagrant/virtualbox.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -x - -apt-get install --yes virtualbox-guest-utils diff --git a/images/base/scripts/vagrant/zerodisk.sh b/images/base/scripts/vagrant/zerodisk.sh deleted file mode 100644 index 35370d64a3d..00000000000 --- a/images/base/scripts/vagrant/zerodisk.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Zero out the free space to save space in the final image: -dd if=/dev/zero of=/EMPTY bs=1M -rm -f /EMPTY - -# Sync to ensure that the delete completes before this moves on. -sync -sync -sync diff --git a/images/ccenv/Dockerfile.in b/images/ccenv/Dockerfile.in index dc79f631be1..27d764f7f35 100644 --- a/images/ccenv/Dockerfile.in +++ b/images/ccenv/Dockerfile.in @@ -1,2 +1,3 @@ -FROM hyperledger/fabric-src:latest +FROM hyperledger/fabric-baseimage:_BASE_TAG_ COPY bin/* /usr/local/bin/ +ADD goshim.tar.bz2 $GOPATH/src/ diff --git a/images/javaenv/Dockerfile.in b/images/javaenv/Dockerfile.in index 48ad2844568..678ca973a16 100644 --- a/images/javaenv/Dockerfile.in +++ b/images/javaenv/Dockerfile.in @@ -1,9 +1,9 @@ -FROM java:openjdk-8 +FROM openjdk:8 RUN wget https://services.gradle.org/distributions/gradle-2.12-bin.zip -P /tmp --quiet RUN unzip -q /tmp/gradle-2.12-bin.zip -d /opt && rm /tmp/gradle-2.12-bin.zip RUN ln -s /opt/gradle-2.12/bin/gradle /usr/bin ADD javashimsrc.tar.bz2 /root ADD protos.tar.bz2 /root WORKDIR /root -# Build java shim after copying proto files from fabric/proto +# Build java shim after copying proto files from fabric/proto RUN core/chaincode/shim/java/javabuild.sh diff --git a/images/src/Dockerfile.in b/images/src/Dockerfile.in index 5b94527b6e4..507220741e5 100644 --- a/images/src/Dockerfile.in +++ b/images/src/Dockerfile.in @@ -1,2 +1,2 @@ -FROM hyperledger/fabric-baseimage:latest +FROM hyperledger/fabric-baseimage:_BASE_TAG_ ADD gopath.tar.bz2 $GOPATH/src/github.com/hyperledger/fabric diff --git a/membersrvc/Dockerfile b/membersrvc/Dockerfile deleted file mode 100644 index f19aab67568..00000000000 --- a/membersrvc/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -from hyperledger/fabric-baseimage:latest -# Copy GOPATH src and install Peer -RUN mkdir -p /var/hyperledger/db -WORKDIR $GOPATH/src/github.com/hyperledger/fabric/ -COPY . . -WORKDIR membersrvc -RUN pwd -RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/membersrvc/membersrvc.yaml $GOPATH/bin -# RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install diff --git a/membersrvc/ca/aca.go b/membersrvc/ca/aca.go index c0d83e814fe..22e9428b139 100644 --- a/membersrvc/ca/aca.go +++ b/membersrvc/ca/aca.go @@ -19,7 +19,6 @@ package ca import ( "encoding/asn1" "errors" - "google/protobuf" "strings" "time" @@ -32,6 +31,7 @@ import ( "github.com/spf13/viper" "google.golang.org/grpc" + "github.com/golang/protobuf/ptypes/timestamp" pb "github.com/hyperledger/fabric/membersrvc/protos" ) @@ -205,16 +205,16 @@ func (attrPair *AttributePair) SetValidTo(date time.Time) { //ToACAAttribute converts the receiver to the protobuf format. func (attrPair *AttributePair) ToACAAttribute() *pb.ACAAttribute { - var from, to *google_protobuf.Timestamp + var from, to *timestamp.Timestamp if attrPair.validFrom.IsZero() { from = nil } else { - from = &google_protobuf.Timestamp{Seconds: attrPair.validFrom.Unix(), Nanos: int32(attrPair.validFrom.UnixNano())} + from = ×tamp.Timestamp{Seconds: attrPair.validFrom.Unix(), Nanos: int32(attrPair.validFrom.UnixNano())} } if attrPair.validTo.IsZero() { to = nil } else { - to = &google_protobuf.Timestamp{Seconds: attrPair.validTo.Unix(), Nanos: int32(attrPair.validTo.UnixNano())} + to = ×tamp.Timestamp{Seconds: attrPair.validTo.Unix(), Nanos: int32(attrPair.validTo.UnixNano())} } return &pb.ACAAttribute{AttributeName: attrPair.attributeName, AttributeValue: attrPair.attributeValue, ValidFrom: from, ValidTo: to} @@ -276,7 +276,10 @@ func (aca *ACA) fetchAttributes(id, affiliation string) ([]*AttributePair, error return attributes, nil } -func (aca *ACA) populateAttributes(attrs []*AttributePair) error { +func (aca *ACA) PopulateAttributes(attrs []*AttributePair) error { + + acaLogger.Debugf("PopulateAttributes: %+v", attrs) + mutex.Lock() defer mutex.Unlock() @@ -285,6 +288,7 @@ func (aca *ACA) populateAttributes(attrs []*AttributePair) error { return dberr } for _, attr := range attrs { + acaLogger.Debugf("attr: %+v", attr) if err := aca.populateAttribute(tx, attr); err != nil { dberr = tx.Rollback() if dberr != nil { @@ -331,7 +335,7 @@ func (aca *ACA) fetchAndPopulateAttributes(id, affiliation string) error { if err != nil { return err } - err = aca.populateAttributes(attrs) + err = aca.PopulateAttributes(attrs) if err != nil { return err } diff --git a/membersrvc/ca/aca_test.go b/membersrvc/ca/aca_test.go index f47f1f73bcc..476085a7e26 100644 --- a/membersrvc/ca/aca_test.go +++ b/membersrvc/ca/aca_test.go @@ -19,7 +19,6 @@ package ca import ( "bytes" "errors" - "google/protobuf" "io/ioutil" "math/big" "strings" @@ -29,6 +28,7 @@ import ( "crypto/x509" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/crypto/primitives" pb "github.com/hyperledger/fabric/membersrvc/protos" "golang.org/x/net/context" @@ -168,7 +168,7 @@ func fetchAttributes() (*pb.ACAFetchAttrResp, error) { defer sock.Close() req := &pb.ACAFetchAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, ECert: &pb.Cert{Cert: cert.Raw}, Signature: nil} @@ -210,7 +210,7 @@ func TestFetchAttributes_MissingSignature(t *testing.T) { defer sock.Close() req := &pb.ACAFetchAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, ECert: &pb.Cert{Cert: cert.Raw}, Signature: nil} @@ -245,7 +245,7 @@ func TestRequestAttributes(t *testing.T) { attributes = append(attributes, &pb.TCertAttribute{AttributeName: "identity-number"}) req := &pb.ACAAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: identity}, ECert: &pb.Cert{Cert: ecert}, Attributes: attributes, @@ -320,7 +320,7 @@ func TestRequestAttributes_AttributesMismatch(t *testing.T) { attributes = append(attributes, &pb.TCertAttribute{AttributeName: "account"}) req := &pb.ACAAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: identity}, ECert: &pb.Cert{Cert: ecert}, Attributes: attributes, @@ -380,7 +380,7 @@ func TestRequestAttributes_MissingSignature(t *testing.T) { attributes = append(attributes, &pb.TCertAttribute{AttributeName: "identity-number"}) req := &pb.ACAAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: identity}, ECert: &pb.Cert{Cert: ecert}, Attributes: attributes, @@ -415,7 +415,7 @@ func TestRequestAttributes_DuplicatedAttributes(t *testing.T) { attributes = append(attributes, &pb.TCertAttribute{AttributeName: "company"}) req := &pb.ACAAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: identity}, ECert: &pb.Cert{Cert: ecert}, Attributes: attributes, @@ -469,7 +469,7 @@ func TestRequestAttributes_FullAttributes(t *testing.T) { attributes = append(attributes, &pb.TCertAttribute{AttributeName: "business_unit"}) req := &pb.ACAAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: identity}, ECert: &pb.Cert{Cert: ecert}, Attributes: attributes, @@ -549,7 +549,7 @@ func TestRequestAttributes_PartialAttributes(t *testing.T) { attributes = append(attributes, &pb.TCertAttribute{AttributeName: "credit_card"}) req := &pb.ACAAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: identity}, ECert: &pb.Cert{Cert: ecert}, Attributes: attributes, diff --git a/membersrvc/ca/ca.go b/membersrvc/ca/ca.go index 05789be7dbb..3698d9b1f7c 100644 --- a/membersrvc/ca/ca.go +++ b/membersrvc/ca/ca.go @@ -35,6 +35,8 @@ import ( "sync" "time" + gp "google/protobuf" + "github.com/hyperledger/fabric/core/crypto/primitives" "github.com/hyperledger/fabric/flogging" pb "github.com/hyperledger/fabric/membersrvc/protos" @@ -578,11 +580,11 @@ func (ca *CA) validateAndGenerateEnrollID(id, affiliation string, role pb.Role) // registerUser registers a new member with the CA // -func (ca *CA) registerUser(id, affiliation string, role pb.Role, registrar, memberMetadata string, opt ...string) (string, error) { +func (ca *CA) registerUser(id, affiliation string, role pb.Role, attrs []*pb.Attribute, aca *ACA, registrar, memberMetadata string, opt ...string) (string, error) { memberMetadata = removeQuotes(memberMetadata) roleStr, _ := MemberRoleToString(role) - caLogger.Debugf("Received request to register user with id: %s, affiliation: %s, role: %s, registrar: %s, memberMetadata: %s\n", - id, affiliation, roleStr, registrar, memberMetadata) + caLogger.Debugf("Received request to register user with id: %s, affiliation: %s, role: %s, attrs: %+v, registrar: %s, memberMetadata: %s\n", + id, affiliation, roleStr, attrs, registrar, memberMetadata) var enrollID, tok string var err error @@ -606,11 +608,21 @@ func (ca *CA) registerUser(id, affiliation string, role pb.Role, registrar, memb if err != nil { return "", err } + tok, err = ca.registerUserWithEnrollID(id, enrollID, role, memberMetadata, opt...) if err != nil { return "", err } - return tok, nil + + if attrs != nil && aca != nil { + var pairs []*AttributePair + pairs, err = toAttributePairs(id, affiliation, attrs) + if err == nil { + err = aca.PopulateAttributes(pairs) + } + } + + return tok, err } // registerUserWithEnrollID registers a new user and its enrollmentID, role and state @@ -870,25 +882,36 @@ func (mm *MemberMetadata) canRegister(registrar string, newRole string, newMembe caLogger.Debugf("MM.canRegister: role %s can't be registered by %s\n", newRole, registrar) return errors.New("member " + registrar + " may not register member of type " + newRole) } + // The registrar privileges that are being registered must not be larger than the registrar's if newMemberMetadata == nil { // Not requesting registrar privileges for this member, so we are OK caLogger.Debug("MM.canRegister: not requesting registrar privileges") return nil } - return strsContained(newMemberMetadata.Registrar.Roles, mm.Registrar.DelegateRoles, registrar, "delegateRoles") + + // Make sure this registrar is not delegating an invalid role + err := checkDelegateRoles(newMemberMetadata.Registrar.Roles, mm.Registrar.DelegateRoles, registrar) + if err != nil { + caLogger.Debug("MM.canRegister: checkDelegateRoles failure") + return err + } + + // Can register OK + caLogger.Debug("MM.canRegister: OK") + return nil } // Return an error if all strings in 'strs1' are not contained in 'strs2' -func strsContained(strs1 []string, strs2 []string, registrar string, field string) error { - caLogger.Debugf("CA.strsContained: registrar=%s, field=%s, strs1=%+v, strs2=%+v\n", registrar, field, strs1, strs2) +func checkDelegateRoles(strs1 []string, strs2 []string, registrar string) error { + caLogger.Debugf("CA.checkDelegateRoles: registrar=%s, strs1=%+v, strs2=%+v\n", registrar, strs1, strs2) for _, s := range strs1 { if !strContained(s, strs2) { - caLogger.Debugf("CA.strsContained: no: %s not in %+v\n", s, strs2) - return errors.New("user " + registrar + " may not register " + field + " " + s) + caLogger.Debugf("CA.checkDelegateRoles: no: %s not in %+v\n", s, strs2) + return errors.New("user " + registrar + " may not register delegateRoles " + s) } } - caLogger.Debug("CA.strsContained: ok") + caLogger.Debug("CA.checkDelegateRoles: ok") return nil } @@ -902,6 +925,16 @@ func strContained(str string, strs []string) bool { return false } +// Return true if 'str' is prefixed by any string in 'strs'; otherwise return false +func isPrefixed(str string, strs []string) bool { + for _, s := range strs { + if strings.HasPrefix(str, s) { + return true + } + } + return false +} + // convert a role to a string func role2String(role int) string { if role == int(pb.Role_CLIENT) { @@ -928,3 +961,30 @@ func removeQuotes(str string) string { caLogger.Debugf("removeQuotes: %s\n", str) return str } + +// Convert the protobuf array of attributes to the AttributePair array format +// as required by the ACA code to populate the table +func toAttributePairs(id, affiliation string, attrs []*pb.Attribute) ([]*AttributePair, error) { + var pairs = make([]*AttributePair, 0) + for _, attr := range attrs { + vals := []string{id, affiliation, attr.Name, attr.Value, attr.NotBefore, attr.NotAfter} + pair, err := NewAttributePair(vals, nil) + if err != nil { + return nil, err + } + pairs = append(pairs, pair) + } + caLogger.Debugf("toAttributePairs: id=%s, affiliation=%s, attrs=%v, pairs=%v\n", + id, affiliation, attrs, pairs) + return pairs, nil +} + +func convertTime(ts *gp.Timestamp) time.Time { + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t +} diff --git a/membersrvc/ca/ca_test.yaml b/membersrvc/ca/ca_test.yaml index 2dcc8bb2637..3b3e68a7587 100644 --- a/membersrvc/ca/ca_test.yaml +++ b/membersrvc/ca/ca_test.yaml @@ -240,9 +240,9 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH #timeout in millisecs for starting up a container and waiting for Register to come through. 1sec should be plenty for chaincode unit tests startuptimeout: 1000 diff --git a/membersrvc/ca/eca.go b/membersrvc/ca/eca.go index 9fcb5e129ec..43ec9522621 100644 --- a/membersrvc/ca/eca.go +++ b/membersrvc/ca/eca.go @@ -48,6 +48,7 @@ var ( // type ECA struct { *CA + aca *ACA obcKey []byte obcPriv, obcPub []byte gRPCServer *grpc.Server @@ -59,8 +60,8 @@ func initializeECATables(db *sql.DB) error { // NewECA sets up a new ECA. // -func NewECA() *ECA { - eca := &ECA{CA: NewCA("eca", initializeECATables)} +func NewECA(aca *ACA) *ECA { + eca := &ECA{CA: NewCA("eca", initializeECATables), aca: aca} flogging.LoggingInit("eca") { @@ -152,7 +153,7 @@ func (eca *ECA) populateUsersTable() { } } } - eca.registerUser(id, affiliation, pb.Role(role), registrar, memberMetadata, vals[1]) + eca.registerUser(id, affiliation, pb.Role(role), nil, eca.aca, registrar, memberMetadata, vals[1]) } } diff --git a/membersrvc/ca/eca_test.go b/membersrvc/ca/eca_test.go index bfbcf5220c1..a0f81c6be3a 100644 --- a/membersrvc/ca/eca_test.go +++ b/membersrvc/ca/eca_test.go @@ -21,12 +21,12 @@ import ( "crypto/rand" "crypto/x509" "errors" - "google/protobuf" "os" "testing" "time" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/crypto/primitives" "github.com/hyperledger/fabric/core/crypto/primitives/ecies" pb "github.com/hyperledger/fabric/membersrvc/protos" @@ -85,7 +85,7 @@ func enrollUser(user *User) error { } req := &pb.ECertCreateReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: user.enrollID}, Tok: &pb.Token{Tok: user.enrollPwd}, Sign: &pb.PublicKey{Type: pb.CryptoType_ECDSA, Key: signPub}, @@ -509,7 +509,7 @@ func TestCreateCertificatePairBadIdentity(t *testing.T) { ecap := &ECAP{eca} req := &pb.ECertCreateReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: "badIdentity"}, Tok: &pb.Token{Tok: testUser.enrollPwd}, Sign: &pb.PublicKey{Type: pb.CryptoType_ECDSA, Key: []byte{0}}, @@ -528,7 +528,7 @@ func TestCreateCertificatePairBadToken(t *testing.T) { ecap := &ECAP{eca} req := &pb.ECertCreateReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: testUser.enrollID}, Tok: &pb.Token{Tok: []byte("badPassword")}, Sign: &pb.PublicKey{Type: pb.CryptoType_ECDSA, Key: []byte{0}}, diff --git a/membersrvc/ca/ecaa.go b/membersrvc/ca/ecaa.go index 40af1c1fe06..15cd3e6f0ed 100644 --- a/membersrvc/ca/ecaa.go +++ b/membersrvc/ca/ecaa.go @@ -60,7 +60,7 @@ func (ecaa *ECAA) RegisterUser(ctx context.Context, in *pb.RegisterUserReq) (*pb } jsonStr := string(json) ecaaLogger.Debugf("gRPC ECAA:RegisterUser: json=%s", jsonStr) - tok, err := ecaa.eca.registerUser(in.Id.Id, in.Affiliation, in.Role, registrarID, jsonStr) + tok, err := ecaa.eca.registerUser(in.Id.Id, in.Affiliation, in.Role, in.Attributes, ecaa.eca.aca, registrarID, jsonStr) // Return the one-time password return &pb.Token{Tok: []byte(tok)}, err @@ -105,7 +105,7 @@ func (ecaa *ECAA) checkRegistrarSignature(in *pb.RegisterUserReq) error { // Check the signature if ecdsa.Verify(cert.PublicKey.(*ecdsa.PublicKey), hash.Sum(nil), r, s) == false { // Signature verification failure - ecaaLogger.Debugf("ECAA.checkRegistrarSignature: failure for %s", registrar) + ecaaLogger.Debugf("ECAA.checkRegistrarSignature: failure for %s (len=%d): %+v", registrar, len(raw), in) return errors.New("Signature verification failed.") } diff --git a/membersrvc/ca/ecap.go b/membersrvc/ca/ecap.go index 988bf0ad41b..050d10b3641 100644 --- a/membersrvc/ca/ecap.go +++ b/membersrvc/ca/ecap.go @@ -23,7 +23,6 @@ import ( "crypto/x509" "crypto/x509/pkix" "errors" - "google/protobuf" "io/ioutil" "math/big" "strconv" @@ -32,6 +31,7 @@ import ( "github.com/hyperledger/fabric/core/crypto/primitives/ecies" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/crypto/primitives" pb "github.com/hyperledger/fabric/membersrvc/protos" "github.com/op/go-logging" @@ -64,7 +64,7 @@ func (ecap *ECAP) fetchAttributes(cert *pb.Cert) error { defer sock.Close() req := &pb.ACAFetchAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, ECert: cert, Signature: nil} diff --git a/membersrvc/ca/membersrvc_test.go b/membersrvc/ca/membersrvc_test.go index 6a0ec0a756f..a294f252663 100644 --- a/membersrvc/ca/membersrvc_test.go +++ b/membersrvc/ca/membersrvc_test.go @@ -70,7 +70,7 @@ func setupTestConfig() { func initPKI() { CacheConfiguration() // Cache configuration aca = NewACA() - eca = NewECA() + eca = NewECA(aca) tca = NewTCA(eca) } diff --git a/membersrvc/ca/tca_test.go b/membersrvc/ca/tca_test.go index a0b38e1941d..dbbf75c9861 100644 --- a/membersrvc/ca/tca_test.go +++ b/membersrvc/ca/tca_test.go @@ -21,7 +21,6 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "google/protobuf" "io/ioutil" "testing" "time" @@ -29,6 +28,7 @@ import ( "golang.org/x/net/context" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/timestamp" "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/core/crypto/primitives" "github.com/hyperledger/fabric/membersrvc/protos" @@ -155,16 +155,17 @@ func initTCA() (*TCA, error) { } CacheConfiguration() // Cache configuration - eca := NewECA() - if eca == nil { - return nil, fmt.Errorf("Could not create a new ECA") - } aca := NewACA() if aca == nil { return nil, fmt.Errorf("Could not create a new ACA") } + eca := NewECA(aca) + if eca == nil { + return nil, fmt.Errorf("Could not create a new ECA") + } + tca := NewTCA(eca) if tca == nil { return nil, fmt.Errorf("Could not create a new TCA") @@ -175,7 +176,7 @@ func initTCA() (*TCA, error) { func buildCertificateSetRequest(enrollID string, enrollmentPrivKey *ecdsa.PrivateKey, num, numattrs int) (*protos.TCertCreateSetReq, error) { now := time.Now() - timestamp := google_protobuf.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} + timestamp := timestamp.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} var attributes []*protos.TCertAttribute if numattrs >= 0 { // else negative means use nil from above diff --git a/membersrvc/ca/tcap.go b/membersrvc/ca/tcap.go index 840d2b7cecb..da503111734 100644 --- a/membersrvc/ca/tcap.go +++ b/membersrvc/ca/tcap.go @@ -39,7 +39,7 @@ import ( "github.com/spf13/viper" "golang.org/x/net/context" - "google/protobuf" + "github.com/golang/protobuf/ptypes/timestamp" ) var tcapLogger = logging.MustGetLogger("tcap") @@ -69,7 +69,7 @@ func (tcap *TCAP) selectValidAttributes(certRaw []byte) ([]*pb.ACAAttribute, err } currentTime := time.Now() for _, extension := range cert.Extensions { - acaAtt := &pb.ACAAttribute{AttributeName: "", AttributeValue: nil, ValidFrom: &google_protobuf.Timestamp{Seconds: 0, Nanos: 0}, ValidTo: &google_protobuf.Timestamp{Seconds: 0, Nanos: 0}} + acaAtt := &pb.ACAAttribute{AttributeName: "", AttributeValue: nil, ValidFrom: ×tamp.Timestamp{Seconds: 0, Nanos: 0}, ValidTo: ×tamp.Timestamp{Seconds: 0, Nanos: 0}} if IsAttributeOID(extension.Id) { if err := proto.Unmarshal(extension.Value, acaAtt); err != nil { @@ -111,7 +111,7 @@ func (tcap *TCAP) requestAttributes(id string, ecert []byte, attrs []*pb.TCertAt } req := &pb.ACAAttrReq{ - Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, + Ts: ×tamp.Timestamp{Seconds: time.Now().Unix(), Nanos: 0}, Id: &pb.Identity{Id: id}, ECert: &pb.Cert{Cert: ecert}, Attributes: attrNames, diff --git a/membersrvc/ca/tlsca_test.go b/membersrvc/ca/tlsca_test.go index de890a9b704..6fa0d80b5c9 100644 --- a/membersrvc/ca/tlsca_test.go +++ b/membersrvc/ca/tlsca_test.go @@ -31,7 +31,6 @@ import ( "crypto/ecdsa" "crypto/rand" "crypto/x509" - "google/protobuf" "path/filepath" "github.com/golang/protobuf/proto" @@ -40,6 +39,8 @@ import ( membersrvc "github.com/hyperledger/fabric/membersrvc/protos" _ "fmt" + + "github.com/golang/protobuf/ptypes/timestamp" ) var ( @@ -63,7 +64,7 @@ func TestTLS(t *testing.T) { func startTLSCA(t *testing.T) { CacheConfiguration() // Cache configuration - ecaS = NewECA() + ecaS = NewECA(nil) tlscaS = NewTLSCA(ecaS) var opts []grpc.ServerOption @@ -122,7 +123,7 @@ func requestTLSCertificate(t *testing.T) { pubraw, _ := x509.MarshalPKIXPublicKey(&priv.PublicKey) now := time.Now() - timestamp := google_protobuf.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} + timestamp := timestamp.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())} req := &membersrvc.TLSCertCreateReq{ Ts: ×tamp, diff --git a/membersrvc/membersrvc.yaml b/membersrvc/membersrvc.yaml index f4567bc9c5a..a3f8f39563a 100644 --- a/membersrvc/membersrvc.yaml +++ b/membersrvc/membersrvc.yaml @@ -1,8 +1,6 @@ # CA server parameters # server: - # current version of the CA - version: "0.1" # limits the number of operating system threads used by the CA # set to negative to use the system default setting diff --git a/membersrvc/protos/ca.pb.go b/membersrvc/protos/ca.pb.go index a5d9b6d902a..07604c3225f 100644 --- a/membersrvc/protos/ca.pb.go +++ b/membersrvc/protos/ca.pb.go @@ -19,6 +19,7 @@ It has these top-level messages: Signature Registrar RegisterUserReq + Attribute ReadUserSetReq User UserSet @@ -57,7 +58,7 @@ package protos import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "google/protobuf" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" @@ -69,6 +70,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // Public/private keys. type CryptoType int32 @@ -92,6 +99,7 @@ var CryptoType_value = map[string]int32{ func (x CryptoType) String() string { return proto.EnumName(CryptoType_name, int32(x)) } +func (CryptoType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // User registration. // @@ -126,6 +134,7 @@ var Role_value = map[string]int32{ func (x Role) String() string { return proto.EnumName(Role_name, int32(x)) } +func (Role) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type CAStatus_StatusCode int32 @@ -146,6 +155,7 @@ var CAStatus_StatusCode_value = map[string]int32{ func (x CAStatus_StatusCode) String() string { return proto.EnumName(CAStatus_StatusCode_name, int32(x)) } +func (CAStatus_StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } type ACAAttrResp_StatusCode int32 @@ -200,6 +210,7 @@ var ACAAttrResp_StatusCode_value = map[string]int32{ func (x ACAAttrResp_StatusCode) String() string { return proto.EnumName(ACAAttrResp_StatusCode_name, int32(x)) } +func (ACAAttrResp_StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} } type ACAFetchAttrResp_StatusCode int32 @@ -222,6 +233,9 @@ var ACAFetchAttrResp_StatusCode_value = map[string]int32{ func (x ACAFetchAttrResp_StatusCode) String() string { return proto.EnumName(ACAFetchAttrResp_StatusCode_name, int32(x)) } +func (ACAFetchAttrResp_StatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{40, 0} +} type FetchAttrsResult_StatusCode int32 @@ -244,6 +258,9 @@ var FetchAttrsResult_StatusCode_value = map[string]int32{ func (x FetchAttrsResult_StatusCode) String() string { return proto.EnumName(FetchAttrsResult_StatusCode_name, int32(x)) } +func (FetchAttrsResult_StatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{41, 0} +} // Status codes shared by both CAs. // @@ -251,60 +268,67 @@ type CAStatus struct { Status CAStatus_StatusCode `protobuf:"varint,1,opt,name=status,enum=protos.CAStatus_StatusCode" json:"status,omitempty"` } -func (m *CAStatus) Reset() { *m = CAStatus{} } -func (m *CAStatus) String() string { return proto.CompactTextString(m) } -func (*CAStatus) ProtoMessage() {} +func (m *CAStatus) Reset() { *m = CAStatus{} } +func (m *CAStatus) String() string { return proto.CompactTextString(m) } +func (*CAStatus) ProtoMessage() {} +func (*CAStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Empty message. type Empty struct { } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // Uniquely identifies a user towards either CA. type Identity struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } -func (m *Identity) Reset() { *m = Identity{} } -func (m *Identity) String() string { return proto.CompactTextString(m) } -func (*Identity) ProtoMessage() {} +func (m *Identity) Reset() { *m = Identity{} } +func (m *Identity) String() string { return proto.CompactTextString(m) } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type Token struct { Tok []byte `protobuf:"bytes,1,opt,name=tok,proto3" json:"tok,omitempty"` } -func (m *Token) Reset() { *m = Token{} } -func (m *Token) String() string { return proto.CompactTextString(m) } -func (*Token) ProtoMessage() {} +func (m *Token) Reset() { *m = Token{} } +func (m *Token) String() string { return proto.CompactTextString(m) } +func (*Token) ProtoMessage() {} +func (*Token) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type Hash struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` } -func (m *Hash) Reset() { *m = Hash{} } -func (m *Hash) String() string { return proto.CompactTextString(m) } -func (*Hash) ProtoMessage() {} +func (m *Hash) Reset() { *m = Hash{} } +func (m *Hash) String() string { return proto.CompactTextString(m) } +func (*Hash) ProtoMessage() {} +func (*Hash) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } type PublicKey struct { Type CryptoType `protobuf:"varint,1,opt,name=type,enum=protos.CryptoType" json:"type,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *PublicKey) Reset() { *m = PublicKey{} } -func (m *PublicKey) String() string { return proto.CompactTextString(m) } -func (*PublicKey) ProtoMessage() {} +func (m *PublicKey) Reset() { *m = PublicKey{} } +func (m *PublicKey) String() string { return proto.CompactTextString(m) } +func (*PublicKey) ProtoMessage() {} +func (*PublicKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } type PrivateKey struct { Type CryptoType `protobuf:"varint,1,opt,name=type,enum=protos.CryptoType" json:"type,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *PrivateKey) Reset() { *m = PrivateKey{} } -func (m *PrivateKey) String() string { return proto.CompactTextString(m) } -func (*PrivateKey) ProtoMessage() {} +func (m *PrivateKey) Reset() { *m = PrivateKey{} } +func (m *PrivateKey) String() string { return proto.CompactTextString(m) } +func (*PrivateKey) ProtoMessage() {} +func (*PrivateKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } // Signature. // @@ -314,9 +338,10 @@ type Signature struct { S []byte `protobuf:"bytes,3,opt,name=s,proto3" json:"s,omitempty"` } -func (m *Signature) Reset() { *m = Signature{} } -func (m *Signature) String() string { return proto.CompactTextString(m) } -func (*Signature) ProtoMessage() {} +func (m *Signature) Reset() { *m = Signature{} } +func (m *Signature) String() string { return proto.CompactTextString(m) } +func (*Signature) ProtoMessage() {} +func (*Signature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } type Registrar struct { Id *Identity `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` @@ -324,9 +349,10 @@ type Registrar struct { DelegateRoles []string `protobuf:"bytes,3,rep,name=delegateRoles" json:"delegateRoles,omitempty"` } -func (m *Registrar) Reset() { *m = Registrar{} } -func (m *Registrar) String() string { return proto.CompactTextString(m) } -func (*Registrar) ProtoMessage() {} +func (m *Registrar) Reset() { *m = Registrar{} } +func (m *Registrar) String() string { return proto.CompactTextString(m) } +func (*Registrar) ProtoMessage() {} +func (*Registrar) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *Registrar) GetId() *Identity { if m != nil { @@ -336,16 +362,18 @@ func (m *Registrar) GetId() *Identity { } type RegisterUserReq struct { - Id *Identity `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Role Role `protobuf:"varint,2,opt,name=role,enum=protos.Role" json:"role,omitempty"` - Affiliation string `protobuf:"bytes,4,opt,name=affiliation" json:"affiliation,omitempty"` - Registrar *Registrar `protobuf:"bytes,5,opt,name=registrar" json:"registrar,omitempty"` - Sig *Signature `protobuf:"bytes,6,opt,name=sig" json:"sig,omitempty"` + Id *Identity `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Role Role `protobuf:"varint,2,opt,name=role,enum=protos.Role" json:"role,omitempty"` + Attributes []*Attribute `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty"` + Affiliation string `protobuf:"bytes,4,opt,name=affiliation" json:"affiliation,omitempty"` + Registrar *Registrar `protobuf:"bytes,5,opt,name=registrar" json:"registrar,omitempty"` + Sig *Signature `protobuf:"bytes,6,opt,name=sig" json:"sig,omitempty"` } -func (m *RegisterUserReq) Reset() { *m = RegisterUserReq{} } -func (m *RegisterUserReq) String() string { return proto.CompactTextString(m) } -func (*RegisterUserReq) ProtoMessage() {} +func (m *RegisterUserReq) Reset() { *m = RegisterUserReq{} } +func (m *RegisterUserReq) String() string { return proto.CompactTextString(m) } +func (*RegisterUserReq) ProtoMessage() {} +func (*RegisterUserReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *RegisterUserReq) GetId() *Identity { if m != nil { @@ -354,6 +382,13 @@ func (m *RegisterUserReq) GetId() *Identity { return nil } +func (m *RegisterUserReq) GetAttributes() []*Attribute { + if m != nil { + return m.Attributes + } + return nil +} + func (m *RegisterUserReq) GetRegistrar() *Registrar { if m != nil { return m.Registrar @@ -368,15 +403,28 @@ func (m *RegisterUserReq) GetSig() *Signature { return nil } +type Attribute struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + NotBefore string `protobuf:"bytes,3,opt,name=notBefore" json:"notBefore,omitempty"` + NotAfter string `protobuf:"bytes,4,opt,name=notAfter" json:"notAfter,omitempty"` +} + +func (m *Attribute) Reset() { *m = Attribute{} } +func (m *Attribute) String() string { return proto.CompactTextString(m) } +func (*Attribute) ProtoMessage() {} +func (*Attribute) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + type ReadUserSetReq struct { Req *Identity `protobuf:"bytes,1,opt,name=req" json:"req,omitempty"` Role Role `protobuf:"varint,2,opt,name=role,enum=protos.Role" json:"role,omitempty"` Sig *Signature `protobuf:"bytes,3,opt,name=sig" json:"sig,omitempty"` } -func (m *ReadUserSetReq) Reset() { *m = ReadUserSetReq{} } -func (m *ReadUserSetReq) String() string { return proto.CompactTextString(m) } -func (*ReadUserSetReq) ProtoMessage() {} +func (m *ReadUserSetReq) Reset() { *m = ReadUserSetReq{} } +func (m *ReadUserSetReq) String() string { return proto.CompactTextString(m) } +func (*ReadUserSetReq) ProtoMessage() {} +func (*ReadUserSetReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *ReadUserSetReq) GetReq() *Identity { if m != nil { @@ -397,9 +445,10 @@ type User struct { Role Role `protobuf:"varint,2,opt,name=role,enum=protos.Role" json:"role,omitempty"` } -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *User) GetId() *Identity { if m != nil { @@ -412,9 +461,10 @@ type UserSet struct { Users []*User `protobuf:"bytes,1,rep,name=users" json:"users,omitempty"` } -func (m *UserSet) Reset() { *m = UserSet{} } -func (m *UserSet) String() string { return proto.CompactTextString(m) } -func (*UserSet) ProtoMessage() {} +func (m *UserSet) Reset() { *m = UserSet{} } +func (m *UserSet) String() string { return proto.CompactTextString(m) } +func (*UserSet) ProtoMessage() {} +func (*UserSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *UserSet) GetUsers() []*User { if m != nil { @@ -434,9 +484,10 @@ type ECertCreateReq struct { Sig *Signature `protobuf:"bytes,6,opt,name=sig" json:"sig,omitempty"` } -func (m *ECertCreateReq) Reset() { *m = ECertCreateReq{} } -func (m *ECertCreateReq) String() string { return proto.CompactTextString(m) } -func (*ECertCreateReq) ProtoMessage() {} +func (m *ECertCreateReq) Reset() { *m = ECertCreateReq{} } +func (m *ECertCreateReq) String() string { return proto.CompactTextString(m) } +func (*ECertCreateReq) ProtoMessage() {} +func (*ECertCreateReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *ECertCreateReq) GetTs() *google_protobuf.Timestamp { if m != nil { @@ -488,9 +539,10 @@ type ECertCreateResp struct { FetchResult *FetchAttrsResult `protobuf:"bytes,4,opt,name=fetchResult" json:"fetchResult,omitempty"` } -func (m *ECertCreateResp) Reset() { *m = ECertCreateResp{} } -func (m *ECertCreateResp) String() string { return proto.CompactTextString(m) } -func (*ECertCreateResp) ProtoMessage() {} +func (m *ECertCreateResp) Reset() { *m = ECertCreateResp{} } +func (m *ECertCreateResp) String() string { return proto.CompactTextString(m) } +func (*ECertCreateResp) ProtoMessage() {} +func (*ECertCreateResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *ECertCreateResp) GetCerts() *CertPair { if m != nil { @@ -524,9 +576,10 @@ type ECertReadReq struct { Id *Identity `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } -func (m *ECertReadReq) Reset() { *m = ECertReadReq{} } -func (m *ECertReadReq) String() string { return proto.CompactTextString(m) } -func (*ECertReadReq) ProtoMessage() {} +func (m *ECertReadReq) Reset() { *m = ECertReadReq{} } +func (m *ECertReadReq) String() string { return proto.CompactTextString(m) } +func (*ECertReadReq) ProtoMessage() {} +func (*ECertReadReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *ECertReadReq) GetId() *Identity { if m != nil { @@ -541,9 +594,10 @@ type ECertRevokeReq struct { Sig *Signature `protobuf:"bytes,3,opt,name=sig" json:"sig,omitempty"` } -func (m *ECertRevokeReq) Reset() { *m = ECertRevokeReq{} } -func (m *ECertRevokeReq) String() string { return proto.CompactTextString(m) } -func (*ECertRevokeReq) ProtoMessage() {} +func (m *ECertRevokeReq) Reset() { *m = ECertRevokeReq{} } +func (m *ECertRevokeReq) String() string { return proto.CompactTextString(m) } +func (*ECertRevokeReq) ProtoMessage() {} +func (*ECertRevokeReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *ECertRevokeReq) GetId() *Identity { if m != nil { @@ -571,9 +625,10 @@ type ECertCRLReq struct { Sig *Signature `protobuf:"bytes,2,opt,name=sig" json:"sig,omitempty"` } -func (m *ECertCRLReq) Reset() { *m = ECertCRLReq{} } -func (m *ECertCRLReq) String() string { return proto.CompactTextString(m) } -func (*ECertCRLReq) ProtoMessage() {} +func (m *ECertCRLReq) Reset() { *m = ECertCRLReq{} } +func (m *ECertCRLReq) String() string { return proto.CompactTextString(m) } +func (*ECertCRLReq) ProtoMessage() {} +func (*ECertCRLReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *ECertCRLReq) GetId() *Identity { if m != nil { @@ -596,9 +651,10 @@ type TCertCreateReq struct { Sig *Signature `protobuf:"bytes,4,opt,name=sig" json:"sig,omitempty"` } -func (m *TCertCreateReq) Reset() { *m = TCertCreateReq{} } -func (m *TCertCreateReq) String() string { return proto.CompactTextString(m) } -func (*TCertCreateReq) ProtoMessage() {} +func (m *TCertCreateReq) Reset() { *m = TCertCreateReq{} } +func (m *TCertCreateReq) String() string { return proto.CompactTextString(m) } +func (*TCertCreateReq) ProtoMessage() {} +func (*TCertCreateReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *TCertCreateReq) GetTs() *google_protobuf.Timestamp { if m != nil { @@ -632,9 +688,10 @@ type TCertCreateResp struct { Cert *Cert `protobuf:"bytes,1,opt,name=cert" json:"cert,omitempty"` } -func (m *TCertCreateResp) Reset() { *m = TCertCreateResp{} } -func (m *TCertCreateResp) String() string { return proto.CompactTextString(m) } -func (*TCertCreateResp) ProtoMessage() {} +func (m *TCertCreateResp) Reset() { *m = TCertCreateResp{} } +func (m *TCertCreateResp) String() string { return proto.CompactTextString(m) } +func (*TCertCreateResp) ProtoMessage() {} +func (*TCertCreateResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *TCertCreateResp) GetCert() *Cert { if m != nil { @@ -651,9 +708,10 @@ type TCertCreateSetReq struct { Sig *Signature `protobuf:"bytes,5,opt,name=sig" json:"sig,omitempty"` } -func (m *TCertCreateSetReq) Reset() { *m = TCertCreateSetReq{} } -func (m *TCertCreateSetReq) String() string { return proto.CompactTextString(m) } -func (*TCertCreateSetReq) ProtoMessage() {} +func (m *TCertCreateSetReq) Reset() { *m = TCertCreateSetReq{} } +func (m *TCertCreateSetReq) String() string { return proto.CompactTextString(m) } +func (*TCertCreateSetReq) ProtoMessage() {} +func (*TCertCreateSetReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *TCertCreateSetReq) GetTs() *google_protobuf.Timestamp { if m != nil { @@ -687,17 +745,19 @@ type TCertAttribute struct { AttributeName string `protobuf:"bytes,1,opt,name=attributeName" json:"attributeName,omitempty"` } -func (m *TCertAttribute) Reset() { *m = TCertAttribute{} } -func (m *TCertAttribute) String() string { return proto.CompactTextString(m) } -func (*TCertAttribute) ProtoMessage() {} +func (m *TCertAttribute) Reset() { *m = TCertAttribute{} } +func (m *TCertAttribute) String() string { return proto.CompactTextString(m) } +func (*TCertAttribute) ProtoMessage() {} +func (*TCertAttribute) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } type TCertCreateSetResp struct { Certs *CertSet `protobuf:"bytes,1,opt,name=certs" json:"certs,omitempty"` } -func (m *TCertCreateSetResp) Reset() { *m = TCertCreateSetResp{} } -func (m *TCertCreateSetResp) String() string { return proto.CompactTextString(m) } -func (*TCertCreateSetResp) ProtoMessage() {} +func (m *TCertCreateSetResp) Reset() { *m = TCertCreateSetResp{} } +func (m *TCertCreateSetResp) String() string { return proto.CompactTextString(m) } +func (*TCertCreateSetResp) ProtoMessage() {} +func (*TCertCreateSetResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *TCertCreateSetResp) GetCerts() *CertSet { if m != nil { @@ -714,9 +774,10 @@ type TCertReadSetsReq struct { Sig *Signature `protobuf:"bytes,5,opt,name=sig" json:"sig,omitempty"` } -func (m *TCertReadSetsReq) Reset() { *m = TCertReadSetsReq{} } -func (m *TCertReadSetsReq) String() string { return proto.CompactTextString(m) } -func (*TCertReadSetsReq) ProtoMessage() {} +func (m *TCertReadSetsReq) Reset() { *m = TCertReadSetsReq{} } +func (m *TCertReadSetsReq) String() string { return proto.CompactTextString(m) } +func (*TCertReadSetsReq) ProtoMessage() {} +func (*TCertReadSetsReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *TCertReadSetsReq) GetBegin() *google_protobuf.Timestamp { if m != nil { @@ -752,9 +813,10 @@ type TCertRevokeReq struct { Sig *Signature `protobuf:"bytes,3,opt,name=sig" json:"sig,omitempty"` } -func (m *TCertRevokeReq) Reset() { *m = TCertRevokeReq{} } -func (m *TCertRevokeReq) String() string { return proto.CompactTextString(m) } -func (*TCertRevokeReq) ProtoMessage() {} +func (m *TCertRevokeReq) Reset() { *m = TCertRevokeReq{} } +func (m *TCertRevokeReq) String() string { return proto.CompactTextString(m) } +func (*TCertRevokeReq) ProtoMessage() {} +func (*TCertRevokeReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *TCertRevokeReq) GetId() *Identity { if m != nil { @@ -783,9 +845,10 @@ type TCertRevokeSetReq struct { Sig *Signature `protobuf:"bytes,3,opt,name=sig" json:"sig,omitempty"` } -func (m *TCertRevokeSetReq) Reset() { *m = TCertRevokeSetReq{} } -func (m *TCertRevokeSetReq) String() string { return proto.CompactTextString(m) } -func (*TCertRevokeSetReq) ProtoMessage() {} +func (m *TCertRevokeSetReq) Reset() { *m = TCertRevokeSetReq{} } +func (m *TCertRevokeSetReq) String() string { return proto.CompactTextString(m) } +func (*TCertRevokeSetReq) ProtoMessage() {} +func (*TCertRevokeSetReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *TCertRevokeSetReq) GetId() *Identity { if m != nil { @@ -813,9 +876,10 @@ type TCertCRLReq struct { Sig *Signature `protobuf:"bytes,2,opt,name=sig" json:"sig,omitempty"` } -func (m *TCertCRLReq) Reset() { *m = TCertCRLReq{} } -func (m *TCertCRLReq) String() string { return proto.CompactTextString(m) } -func (*TCertCRLReq) ProtoMessage() {} +func (m *TCertCRLReq) Reset() { *m = TCertCRLReq{} } +func (m *TCertCRLReq) String() string { return proto.CompactTextString(m) } +func (*TCertCRLReq) ProtoMessage() {} +func (*TCertCRLReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } func (m *TCertCRLReq) GetId() *Identity { if m != nil { @@ -838,9 +902,10 @@ type TLSCertCreateReq struct { Sig *Signature `protobuf:"bytes,4,opt,name=sig" json:"sig,omitempty"` } -func (m *TLSCertCreateReq) Reset() { *m = TLSCertCreateReq{} } -func (m *TLSCertCreateReq) String() string { return proto.CompactTextString(m) } -func (*TLSCertCreateReq) ProtoMessage() {} +func (m *TLSCertCreateReq) Reset() { *m = TLSCertCreateReq{} } +func (m *TLSCertCreateReq) String() string { return proto.CompactTextString(m) } +func (*TLSCertCreateReq) ProtoMessage() {} +func (*TLSCertCreateReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *TLSCertCreateReq) GetTs() *google_protobuf.Timestamp { if m != nil { @@ -875,9 +940,10 @@ type TLSCertCreateResp struct { RootCert *Cert `protobuf:"bytes,2,opt,name=rootCert" json:"rootCert,omitempty"` } -func (m *TLSCertCreateResp) Reset() { *m = TLSCertCreateResp{} } -func (m *TLSCertCreateResp) String() string { return proto.CompactTextString(m) } -func (*TLSCertCreateResp) ProtoMessage() {} +func (m *TLSCertCreateResp) Reset() { *m = TLSCertCreateResp{} } +func (m *TLSCertCreateResp) String() string { return proto.CompactTextString(m) } +func (*TLSCertCreateResp) ProtoMessage() {} +func (*TLSCertCreateResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } func (m *TLSCertCreateResp) GetCert() *Cert { if m != nil { @@ -897,9 +963,10 @@ type TLSCertReadReq struct { Id *Identity `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } -func (m *TLSCertReadReq) Reset() { *m = TLSCertReadReq{} } -func (m *TLSCertReadReq) String() string { return proto.CompactTextString(m) } -func (*TLSCertReadReq) ProtoMessage() {} +func (m *TLSCertReadReq) Reset() { *m = TLSCertReadReq{} } +func (m *TLSCertReadReq) String() string { return proto.CompactTextString(m) } +func (*TLSCertReadReq) ProtoMessage() {} +func (*TLSCertReadReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *TLSCertReadReq) GetId() *Identity { if m != nil { @@ -914,9 +981,10 @@ type TLSCertRevokeReq struct { Sig *Signature `protobuf:"bytes,3,opt,name=sig" json:"sig,omitempty"` } -func (m *TLSCertRevokeReq) Reset() { *m = TLSCertRevokeReq{} } -func (m *TLSCertRevokeReq) String() string { return proto.CompactTextString(m) } -func (*TLSCertRevokeReq) ProtoMessage() {} +func (m *TLSCertRevokeReq) Reset() { *m = TLSCertRevokeReq{} } +func (m *TLSCertRevokeReq) String() string { return proto.CompactTextString(m) } +func (*TLSCertRevokeReq) ProtoMessage() {} +func (*TLSCertRevokeReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } func (m *TLSCertRevokeReq) GetId() *Identity { if m != nil { @@ -945,9 +1013,10 @@ type Cert struct { Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` } -func (m *Cert) Reset() { *m = Cert{} } -func (m *Cert) String() string { return proto.CompactTextString(m) } -func (*Cert) ProtoMessage() {} +func (m *Cert) Reset() { *m = Cert{} } +func (m *Cert) String() string { return proto.CompactTextString(m) } +func (*Cert) ProtoMessage() {} +func (*Cert) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } // TCert // @@ -956,9 +1025,10 @@ type TCert struct { Prek0 []byte `protobuf:"bytes,2,opt,name=prek0,proto3" json:"prek0,omitempty"` } -func (m *TCert) Reset() { *m = TCert{} } -func (m *TCert) String() string { return proto.CompactTextString(m) } -func (*TCert) ProtoMessage() {} +func (m *TCert) Reset() { *m = TCert{} } +func (m *TCert) String() string { return proto.CompactTextString(m) } +func (*TCert) ProtoMessage() {} +func (*TCert) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } type CertSet struct { Ts *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=ts" json:"ts,omitempty"` @@ -967,9 +1037,10 @@ type CertSet struct { Certs []*TCert `protobuf:"bytes,4,rep,name=certs" json:"certs,omitempty"` } -func (m *CertSet) Reset() { *m = CertSet{} } -func (m *CertSet) String() string { return proto.CompactTextString(m) } -func (*CertSet) ProtoMessage() {} +func (m *CertSet) Reset() { *m = CertSet{} } +func (m *CertSet) String() string { return proto.CompactTextString(m) } +func (*CertSet) ProtoMessage() {} +func (*CertSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *CertSet) GetTs() *google_protobuf.Timestamp { if m != nil { @@ -996,9 +1067,10 @@ type CertSets struct { Sets []*CertSet `protobuf:"bytes,1,rep,name=sets" json:"sets,omitempty"` } -func (m *CertSets) Reset() { *m = CertSets{} } -func (m *CertSets) String() string { return proto.CompactTextString(m) } -func (*CertSets) ProtoMessage() {} +func (m *CertSets) Reset() { *m = CertSets{} } +func (m *CertSets) String() string { return proto.CompactTextString(m) } +func (*CertSets) ProtoMessage() {} +func (*CertSets) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *CertSets) GetSets() []*CertSet { if m != nil { @@ -1012,9 +1084,10 @@ type CertPair struct { Enc []byte `protobuf:"bytes,2,opt,name=enc,proto3" json:"enc,omitempty"` } -func (m *CertPair) Reset() { *m = CertPair{} } -func (m *CertPair) String() string { return proto.CompactTextString(m) } -func (*CertPair) ProtoMessage() {} +func (m *CertPair) Reset() { *m = CertPair{} } +func (m *CertPair) String() string { return proto.CompactTextString(m) } +func (*CertPair) ProtoMessage() {} +func (*CertPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } // ACAAttrReq is sent to request an ACert (attributes certificate) to the Attribute Certificate Authority (ACA). type ACAAttrReq struct { @@ -1030,9 +1103,10 @@ type ACAAttrReq struct { Signature *Signature `protobuf:"bytes,5,opt,name=signature" json:"signature,omitempty"` } -func (m *ACAAttrReq) Reset() { *m = ACAAttrReq{} } -func (m *ACAAttrReq) String() string { return proto.CompactTextString(m) } -func (*ACAAttrReq) ProtoMessage() {} +func (m *ACAAttrReq) Reset() { *m = ACAAttrReq{} } +func (m *ACAAttrReq) String() string { return proto.CompactTextString(m) } +func (*ACAAttrReq) ProtoMessage() {} +func (*ACAAttrReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } func (m *ACAAttrReq) GetTs() *google_protobuf.Timestamp { if m != nil { @@ -1079,9 +1153,10 @@ type ACAAttrResp struct { Signature *Signature `protobuf:"bytes,3,opt,name=signature" json:"signature,omitempty"` } -func (m *ACAAttrResp) Reset() { *m = ACAAttrResp{} } -func (m *ACAAttrResp) String() string { return proto.CompactTextString(m) } -func (*ACAAttrResp) ProtoMessage() {} +func (m *ACAAttrResp) Reset() { *m = ACAAttrResp{} } +func (m *ACAAttrResp) String() string { return proto.CompactTextString(m) } +func (*ACAAttrResp) ProtoMessage() {} +func (*ACAAttrResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } func (m *ACAAttrResp) GetCert() *Cert { if m != nil { @@ -1107,9 +1182,10 @@ type ACAFetchAttrReq struct { Signature *Signature `protobuf:"bytes,3,opt,name=signature" json:"signature,omitempty"` } -func (m *ACAFetchAttrReq) Reset() { *m = ACAFetchAttrReq{} } -func (m *ACAFetchAttrReq) String() string { return proto.CompactTextString(m) } -func (*ACAFetchAttrReq) ProtoMessage() {} +func (m *ACAFetchAttrReq) Reset() { *m = ACAFetchAttrReq{} } +func (m *ACAFetchAttrReq) String() string { return proto.CompactTextString(m) } +func (*ACAFetchAttrReq) ProtoMessage() {} +func (*ACAFetchAttrReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } func (m *ACAFetchAttrReq) GetTs() *google_protobuf.Timestamp { if m != nil { @@ -1137,24 +1213,26 @@ type ACAFetchAttrResp struct { // Status of the fetch process. Status ACAFetchAttrResp_StatusCode `protobuf:"varint,1,opt,name=status,enum=protos.ACAFetchAttrResp_StatusCode" json:"status,omitempty"` // Error message. - Msg string `protobuf:"bytes,2,opt,name=Msg" json:"Msg,omitempty"` + Msg string `protobuf:"bytes,2,opt,name=Msg,json=msg" json:"Msg,omitempty"` } -func (m *ACAFetchAttrResp) Reset() { *m = ACAFetchAttrResp{} } -func (m *ACAFetchAttrResp) String() string { return proto.CompactTextString(m) } -func (*ACAFetchAttrResp) ProtoMessage() {} +func (m *ACAFetchAttrResp) Reset() { *m = ACAFetchAttrResp{} } +func (m *ACAFetchAttrResp) String() string { return proto.CompactTextString(m) } +func (*ACAFetchAttrResp) ProtoMessage() {} +func (*ACAFetchAttrResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } // FetchAttrsResult is returned within the ECertCreateResp indicating the results of the fetch attributes invoked during enroll. type FetchAttrsResult struct { // Status of the fetch process. Status FetchAttrsResult_StatusCode `protobuf:"varint,1,opt,name=status,enum=protos.FetchAttrsResult_StatusCode" json:"status,omitempty"` // Error message. - Msg string `protobuf:"bytes,2,opt,name=Msg" json:"Msg,omitempty"` + Msg string `protobuf:"bytes,2,opt,name=Msg,json=msg" json:"Msg,omitempty"` } -func (m *FetchAttrsResult) Reset() { *m = FetchAttrsResult{} } -func (m *FetchAttrsResult) String() string { return proto.CompactTextString(m) } -func (*FetchAttrsResult) ProtoMessage() {} +func (m *FetchAttrsResult) Reset() { *m = FetchAttrsResult{} } +func (m *FetchAttrsResult) String() string { return proto.CompactTextString(m) } +func (*FetchAttrsResult) ProtoMessage() {} +func (*FetchAttrsResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } // ACAAttribute is an instance of an attribute with the time constraints. Is used to marshal attributes to be stored within the certificate extensions. type ACAAttribute struct { @@ -1168,9 +1246,10 @@ type ACAAttribute struct { ValidTo *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=validTo" json:"validTo,omitempty"` } -func (m *ACAAttribute) Reset() { *m = ACAAttribute{} } -func (m *ACAAttribute) String() string { return proto.CompactTextString(m) } -func (*ACAAttribute) ProtoMessage() {} +func (m *ACAAttribute) Reset() { *m = ACAAttribute{} } +func (m *ACAAttribute) String() string { return proto.CompactTextString(m) } +func (*ACAAttribute) ProtoMessage() {} +func (*ACAAttribute) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } func (m *ACAAttribute) GetValidFrom() *google_protobuf.Timestamp { if m != nil { @@ -1187,6 +1266,49 @@ func (m *ACAAttribute) GetValidTo() *google_protobuf.Timestamp { } func init() { + proto.RegisterType((*CAStatus)(nil), "protos.CAStatus") + proto.RegisterType((*Empty)(nil), "protos.Empty") + proto.RegisterType((*Identity)(nil), "protos.Identity") + proto.RegisterType((*Token)(nil), "protos.Token") + proto.RegisterType((*Hash)(nil), "protos.Hash") + proto.RegisterType((*PublicKey)(nil), "protos.PublicKey") + proto.RegisterType((*PrivateKey)(nil), "protos.PrivateKey") + proto.RegisterType((*Signature)(nil), "protos.Signature") + proto.RegisterType((*Registrar)(nil), "protos.Registrar") + proto.RegisterType((*RegisterUserReq)(nil), "protos.RegisterUserReq") + proto.RegisterType((*Attribute)(nil), "protos.Attribute") + proto.RegisterType((*ReadUserSetReq)(nil), "protos.ReadUserSetReq") + proto.RegisterType((*User)(nil), "protos.User") + proto.RegisterType((*UserSet)(nil), "protos.UserSet") + proto.RegisterType((*ECertCreateReq)(nil), "protos.ECertCreateReq") + proto.RegisterType((*ECertCreateResp)(nil), "protos.ECertCreateResp") + proto.RegisterType((*ECertReadReq)(nil), "protos.ECertReadReq") + proto.RegisterType((*ECertRevokeReq)(nil), "protos.ECertRevokeReq") + proto.RegisterType((*ECertCRLReq)(nil), "protos.ECertCRLReq") + proto.RegisterType((*TCertCreateReq)(nil), "protos.TCertCreateReq") + proto.RegisterType((*TCertCreateResp)(nil), "protos.TCertCreateResp") + proto.RegisterType((*TCertCreateSetReq)(nil), "protos.TCertCreateSetReq") + proto.RegisterType((*TCertAttribute)(nil), "protos.TCertAttribute") + proto.RegisterType((*TCertCreateSetResp)(nil), "protos.TCertCreateSetResp") + proto.RegisterType((*TCertReadSetsReq)(nil), "protos.TCertReadSetsReq") + proto.RegisterType((*TCertRevokeReq)(nil), "protos.TCertRevokeReq") + proto.RegisterType((*TCertRevokeSetReq)(nil), "protos.TCertRevokeSetReq") + proto.RegisterType((*TCertCRLReq)(nil), "protos.TCertCRLReq") + proto.RegisterType((*TLSCertCreateReq)(nil), "protos.TLSCertCreateReq") + proto.RegisterType((*TLSCertCreateResp)(nil), "protos.TLSCertCreateResp") + proto.RegisterType((*TLSCertReadReq)(nil), "protos.TLSCertReadReq") + proto.RegisterType((*TLSCertRevokeReq)(nil), "protos.TLSCertRevokeReq") + proto.RegisterType((*Cert)(nil), "protos.Cert") + proto.RegisterType((*TCert)(nil), "protos.TCert") + proto.RegisterType((*CertSet)(nil), "protos.CertSet") + proto.RegisterType((*CertSets)(nil), "protos.CertSets") + proto.RegisterType((*CertPair)(nil), "protos.CertPair") + proto.RegisterType((*ACAAttrReq)(nil), "protos.ACAAttrReq") + proto.RegisterType((*ACAAttrResp)(nil), "protos.ACAAttrResp") + proto.RegisterType((*ACAFetchAttrReq)(nil), "protos.ACAFetchAttrReq") + proto.RegisterType((*ACAFetchAttrResp)(nil), "protos.ACAFetchAttrResp") + proto.RegisterType((*FetchAttrsResult)(nil), "protos.FetchAttrsResult") + proto.RegisterType((*ACAAttribute)(nil), "protos.ACAAttribute") proto.RegisterEnum("protos.CryptoType", CryptoType_name, CryptoType_value) proto.RegisterEnum("protos.Role", Role_name, Role_value) proto.RegisterEnum("protos.CAStatus_StatusCode", CAStatus_StatusCode_name, CAStatus_StatusCode_value) @@ -1199,6 +1321,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for ECAP service type ECAPClient interface { @@ -1276,64 +1402,94 @@ func RegisterECAPServer(s *grpc.Server, srv ECAPServer) { s.RegisterService(&_ECAP_serviceDesc, srv) } -func _ECAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAPServer).ReadCACertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAPServer).ReadCACertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAP/ReadCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAPServer).ReadCACertificate(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) } -func _ECAP_CreateCertificatePair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAP_CreateCertificatePair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ECertCreateReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAPServer).CreateCertificatePair(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAPServer).CreateCertificatePair(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAP/CreateCertificatePair", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAPServer).CreateCertificatePair(ctx, req.(*ECertCreateReq)) + } + return interceptor(ctx, in, info, handler) } -func _ECAP_ReadCertificatePair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAP_ReadCertificatePair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ECertReadReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAPServer).ReadCertificatePair(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAPServer).ReadCertificatePair(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAP/ReadCertificatePair", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAPServer).ReadCertificatePair(ctx, req.(*ECertReadReq)) + } + return interceptor(ctx, in, info, handler) } -func _ECAP_ReadCertificateByHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAP_ReadCertificateByHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Hash) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAPServer).ReadCertificateByHash(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAPServer).ReadCertificateByHash(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAP/ReadCertificateByHash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAPServer).ReadCertificateByHash(ctx, req.(*Hash)) + } + return interceptor(ctx, in, info, handler) } -func _ECAP_RevokeCertificatePair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAP_RevokeCertificatePair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ECertRevokeReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAPServer).RevokeCertificatePair(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAPServer).RevokeCertificatePair(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAP/RevokeCertificatePair", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAPServer).RevokeCertificatePair(ctx, req.(*ECertRevokeReq)) + } + return interceptor(ctx, in, info, handler) } var _ECAP_serviceDesc = grpc.ServiceDesc{ @@ -1361,7 +1517,8 @@ var _ECAP_serviceDesc = grpc.ServiceDesc{ Handler: _ECAP_RevokeCertificatePair_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, } // Client API for ECAA service @@ -1430,52 +1587,76 @@ func RegisterECAAServer(s *grpc.Server, srv ECAAServer) { s.RegisterService(&_ECAA_serviceDesc, srv) } -func _ECAA_RegisterUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAA_RegisterUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RegisterUserReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAAServer).RegisterUser(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAAServer).RegisterUser(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAA/RegisterUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAAServer).RegisterUser(ctx, req.(*RegisterUserReq)) + } + return interceptor(ctx, in, info, handler) } -func _ECAA_ReadUserSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAA_ReadUserSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadUserSetReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAAServer).ReadUserSet(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAAServer).ReadUserSet(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAA/ReadUserSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAAServer).ReadUserSet(ctx, req.(*ReadUserSetReq)) + } + return interceptor(ctx, in, info, handler) } -func _ECAA_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAA_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ECertRevokeReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAAServer).RevokeCertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAAServer).RevokeCertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAA/RevokeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAAServer).RevokeCertificate(ctx, req.(*ECertRevokeReq)) + } + return interceptor(ctx, in, info, handler) } -func _ECAA_PublishCRL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ECAA_PublishCRL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ECertCRLReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ECAAServer).PublishCRL(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ECAAServer).PublishCRL(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ECAA/PublishCRL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ECAAServer).PublishCRL(ctx, req.(*ECertCRLReq)) + } + return interceptor(ctx, in, info, handler) } var _ECAA_serviceDesc = grpc.ServiceDesc{ @@ -1499,7 +1680,8 @@ var _ECAA_serviceDesc = grpc.ServiceDesc{ Handler: _ECAA_PublishCRL_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, } // Client API for TCAP service @@ -1568,52 +1750,76 @@ func RegisterTCAPServer(s *grpc.Server, srv TCAPServer) { s.RegisterService(&_TCAP_serviceDesc, srv) } -func _TCAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TCAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TCAPServer).ReadCACertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TCAPServer).ReadCACertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TCAP/ReadCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TCAPServer).ReadCACertificate(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) } -func _TCAP_CreateCertificateSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TCAP_CreateCertificateSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TCertCreateSetReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TCAPServer).CreateCertificateSet(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TCAPServer).CreateCertificateSet(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TCAP/CreateCertificateSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TCAPServer).CreateCertificateSet(ctx, req.(*TCertCreateSetReq)) + } + return interceptor(ctx, in, info, handler) } -func _TCAP_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TCAP_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TCertRevokeReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TCAPServer).RevokeCertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TCAPServer).RevokeCertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TCAP/RevokeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TCAPServer).RevokeCertificate(ctx, req.(*TCertRevokeReq)) + } + return interceptor(ctx, in, info, handler) } -func _TCAP_RevokeCertificateSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TCAP_RevokeCertificateSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TCertRevokeSetReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TCAPServer).RevokeCertificateSet(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TCAPServer).RevokeCertificateSet(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TCAP/RevokeCertificateSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TCAPServer).RevokeCertificateSet(ctx, req.(*TCertRevokeSetReq)) + } + return interceptor(ctx, in, info, handler) } var _TCAP_serviceDesc = grpc.ServiceDesc{ @@ -1637,7 +1843,8 @@ var _TCAP_serviceDesc = grpc.ServiceDesc{ Handler: _TCAP_RevokeCertificateSet_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, } // Client API for TCAA service @@ -1695,40 +1902,58 @@ func RegisterTCAAServer(s *grpc.Server, srv TCAAServer) { s.RegisterService(&_TCAA_serviceDesc, srv) } -func _TCAA_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TCAA_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TCertRevokeReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TCAAServer).RevokeCertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TCAAServer).RevokeCertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TCAA/RevokeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TCAAServer).RevokeCertificate(ctx, req.(*TCertRevokeReq)) + } + return interceptor(ctx, in, info, handler) } -func _TCAA_RevokeCertificateSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TCAA_RevokeCertificateSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TCertRevokeSetReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TCAAServer).RevokeCertificateSet(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TCAAServer).RevokeCertificateSet(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TCAA/RevokeCertificateSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TCAAServer).RevokeCertificateSet(ctx, req.(*TCertRevokeSetReq)) + } + return interceptor(ctx, in, info, handler) } -func _TCAA_PublishCRL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TCAA_PublishCRL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TCertCRLReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TCAAServer).PublishCRL(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TCAAServer).PublishCRL(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TCAA/PublishCRL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TCAAServer).PublishCRL(ctx, req.(*TCertCRLReq)) + } + return interceptor(ctx, in, info, handler) } var _TCAA_serviceDesc = grpc.ServiceDesc{ @@ -1748,7 +1973,8 @@ var _TCAA_serviceDesc = grpc.ServiceDesc{ Handler: _TCAA_PublishCRL_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, } // Client API for TLSCAP service @@ -1817,52 +2043,76 @@ func RegisterTLSCAPServer(s *grpc.Server, srv TLSCAPServer) { s.RegisterService(&_TLSCAP_serviceDesc, srv) } -func _TLSCAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TLSCAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TLSCAPServer).ReadCACertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TLSCAPServer).ReadCACertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TLSCAP/ReadCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TLSCAPServer).ReadCACertificate(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) } -func _TLSCAP_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TLSCAP_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TLSCertCreateReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TLSCAPServer).CreateCertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TLSCAPServer).CreateCertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TLSCAP/CreateCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TLSCAPServer).CreateCertificate(ctx, req.(*TLSCertCreateReq)) + } + return interceptor(ctx, in, info, handler) } -func _TLSCAP_ReadCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TLSCAP_ReadCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TLSCertReadReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TLSCAPServer).ReadCertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TLSCAPServer).ReadCertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TLSCAP/ReadCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TLSCAPServer).ReadCertificate(ctx, req.(*TLSCertReadReq)) + } + return interceptor(ctx, in, info, handler) } -func _TLSCAP_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TLSCAP_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TLSCertRevokeReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TLSCAPServer).RevokeCertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TLSCAPServer).RevokeCertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TLSCAP/RevokeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TLSCAPServer).RevokeCertificate(ctx, req.(*TLSCertRevokeReq)) + } + return interceptor(ctx, in, info, handler) } var _TLSCAP_serviceDesc = grpc.ServiceDesc{ @@ -1886,7 +2136,8 @@ var _TLSCAP_serviceDesc = grpc.ServiceDesc{ Handler: _TLSCAP_RevokeCertificate_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, } // Client API for TLSCAA service @@ -1922,16 +2173,22 @@ func RegisterTLSCAAServer(s *grpc.Server, srv TLSCAAServer) { s.RegisterService(&_TLSCAA_serviceDesc, srv) } -func _TLSCAA_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TLSCAA_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TLSCertRevokeReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TLSCAAServer).RevokeCertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TLSCAAServer).RevokeCertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.TLSCAA/RevokeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TLSCAAServer).RevokeCertificate(ctx, req.(*TLSCertRevokeReq)) + } + return interceptor(ctx, in, info, handler) } var _TLSCAA_serviceDesc = grpc.ServiceDesc{ @@ -1943,7 +2200,8 @@ var _TLSCAA_serviceDesc = grpc.ServiceDesc{ Handler: _TLSCAA_RevokeCertificate_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, } // Client API for ACAP service @@ -2001,40 +2259,58 @@ func RegisterACAPServer(s *grpc.Server, srv ACAPServer) { s.RegisterService(&_ACAP_serviceDesc, srv) } -func _ACAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ACAP_ReadCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ACAPServer).ReadCACertificate(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ACAPServer).ReadCACertificate(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ACAP/ReadCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ACAPServer).ReadCACertificate(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) } -func _ACAP_RequestAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ACAP_RequestAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ACAAttrReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ACAPServer).RequestAttributes(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ACAPServer).RequestAttributes(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ACAP/RequestAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ACAPServer).RequestAttributes(ctx, req.(*ACAAttrReq)) + } + return interceptor(ctx, in, info, handler) } -func _ACAP_FetchAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ACAP_FetchAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ACAFetchAttrReq) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ACAPServer).FetchAttributes(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ACAPServer).FetchAttributes(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.ACAP/FetchAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ACAPServer).FetchAttributes(ctx, req.(*ACAFetchAttrReq)) + } + return interceptor(ctx, in, info, handler) } var _ACAP_serviceDesc = grpc.ServiceDesc{ @@ -2054,5 +2330,130 @@ var _ACAP_serviceDesc = grpc.ServiceDesc{ Handler: _ACAP_FetchAttributes_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("ca.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1881 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x5f, 0x6f, 0xeb, 0x48, + 0x15, 0x5f, 0xc7, 0x49, 0x9b, 0x9c, 0xa4, 0xa9, 0x3b, 0xed, 0x6e, 0x73, 0x03, 0x62, 0xaf, 0xbc, + 0xec, 0xb2, 0x5c, 0x41, 0x7b, 0xb7, 0x45, 0x17, 0xc4, 0xb2, 0x42, 0x6e, 0xea, 0xb2, 0xe1, 0xa6, + 0x49, 0x99, 0x38, 0x0b, 0x6f, 0x51, 0xda, 0xba, 0xa9, 0xd5, 0x34, 0x4e, 0x6d, 0xa7, 0x52, 0xb4, + 0x2f, 0x3c, 0x21, 0x01, 0x12, 0x0f, 0x7c, 0x07, 0x24, 0xc4, 0x37, 0x40, 0x42, 0xe2, 0x95, 0x3f, + 0xbb, 0x12, 0x2f, 0x3c, 0xf2, 0x8c, 0xc4, 0x13, 0x9f, 0x80, 0xcb, 0x99, 0xf1, 0xd8, 0xb1, 0x9d, + 0x3f, 0xf5, 0xed, 0x2d, 0xba, 0xec, 0x43, 0x95, 0x99, 0x39, 0x67, 0x66, 0xce, 0xf9, 0x9d, 0x9f, + 0xcf, 0x9c, 0x99, 0x42, 0xfe, 0xac, 0xb7, 0x33, 0x72, 0x6c, 0xcf, 0x26, 0x2b, 0xfc, 0xc7, 0xad, + 0xbe, 0xdd, 0xb7, 0xed, 0xfe, 0xc0, 0xdc, 0xe5, 0xdd, 0xd3, 0xf1, 0xc5, 0xae, 0x67, 0x5d, 0x9b, + 0xae, 0xd7, 0xbb, 0x1e, 0xf9, 0x8a, 0xea, 0x25, 0xe4, 0x6b, 0x5a, 0xdb, 0xeb, 0x79, 0x63, 0x97, + 0xec, 0xc3, 0x8a, 0xcb, 0x5b, 0x15, 0xe9, 0xb1, 0xf4, 0x7e, 0x79, 0xef, 0x4b, 0xbe, 0x8e, 0xbb, + 0x13, 0x68, 0xec, 0xf8, 0x3f, 0x35, 0xfb, 0xdc, 0xa4, 0x42, 0x55, 0xfd, 0x1a, 0xc0, 0x74, 0x94, + 0xac, 0x40, 0xa6, 0xf5, 0x5c, 0x79, 0x83, 0x6c, 0xc0, 0x5a, 0xa7, 0xf9, 0xbc, 0xd9, 0xfa, 0x71, + 0xb3, 0xab, 0x53, 0xda, 0xa2, 0x8a, 0xa4, 0xae, 0x42, 0x4e, 0xbf, 0x1e, 0x79, 0x13, 0xb5, 0x0a, + 0xf9, 0xfa, 0xb9, 0x39, 0xf4, 0x2c, 0x6f, 0x42, 0xca, 0x90, 0xb1, 0xce, 0xf9, 0x76, 0x05, 0x8a, + 0x2d, 0xf5, 0x11, 0xe4, 0x0c, 0xfb, 0xca, 0x1c, 0x12, 0x05, 0x64, 0xcf, 0xbe, 0xe2, 0x92, 0x12, + 0x65, 0x4d, 0x9c, 0x96, 0xfd, 0xb8, 0xe7, 0x5e, 0x12, 0x02, 0xd9, 0x4b, 0xfc, 0x15, 0x22, 0xde, + 0x56, 0x75, 0x28, 0x9c, 0x8c, 0x4f, 0x07, 0xd6, 0xd9, 0x73, 0x73, 0x42, 0xde, 0x83, 0xac, 0x37, + 0x19, 0x99, 0xc2, 0x09, 0x12, 0x3a, 0xe1, 0x4c, 0x46, 0x9e, 0x6d, 0xa0, 0x84, 0x72, 0x39, 0xdb, + 0xe2, 0xca, 0x9c, 0x54, 0x32, 0xfe, 0x16, 0xd8, 0x54, 0x8f, 0x00, 0x4e, 0x1c, 0xeb, 0xb6, 0xe7, + 0x99, 0xaf, 0xb6, 0x4e, 0x0b, 0x0a, 0x6d, 0xab, 0x3f, 0x44, 0x54, 0x1c, 0x33, 0xf5, 0x32, 0x25, + 0x90, 0x1c, 0xb1, 0x88, 0xe4, 0xb0, 0x9e, 0x5b, 0x91, 0xfd, 0x9e, 0xab, 0x5a, 0x50, 0xa0, 0x66, + 0xdf, 0x72, 0x3d, 0xa7, 0xe7, 0x90, 0xc7, 0x21, 0x66, 0xc5, 0x3d, 0x25, 0x58, 0x2e, 0x40, 0x94, + 0xa1, 0x48, 0xb6, 0x20, 0xe7, 0xd8, 0x03, 0xd3, 0xc5, 0xe5, 0x64, 0x04, 0xd6, 0xef, 0x90, 0xaf, + 0xc2, 0xda, 0xb9, 0x39, 0x30, 0xfb, 0xe8, 0x1e, 0xe5, 0x52, 0x99, 0x4b, 0xe3, 0x83, 0xea, 0x4f, + 0x33, 0xb0, 0xee, 0xef, 0x65, 0x3a, 0x1d, 0xd7, 0x74, 0xa8, 0x79, 0x93, 0x62, 0xc7, 0xc7, 0x90, + 0x65, 0x9b, 0x70, 0xfb, 0xcb, 0x7b, 0xa5, 0x40, 0x87, 0x2d, 0x49, 0xb9, 0x84, 0x7c, 0x00, 0xd0, + 0xf3, 0x3c, 0xc7, 0x3a, 0x1d, 0x7b, 0x62, 0xeb, 0xe2, 0xde, 0x46, 0xa0, 0xa7, 0x05, 0x12, 0x1a, + 0x51, 0xc2, 0x45, 0x8b, 0xbd, 0x8b, 0x0b, 0x6b, 0x60, 0xf5, 0x3c, 0xcb, 0x1e, 0x56, 0xb2, 0x9c, + 0x25, 0xd1, 0x21, 0xb2, 0x0b, 0x05, 0x27, 0xc0, 0xa5, 0x92, 0xe3, 0xf6, 0x85, 0x6b, 0x86, 0x80, + 0xd1, 0xa9, 0x0e, 0x79, 0x07, 0x64, 0xd7, 0xea, 0x57, 0x56, 0xe2, 0xaa, 0x61, 0xb0, 0x28, 0x93, + 0xaa, 0x36, 0x14, 0x42, 0x83, 0x18, 0xdd, 0x86, 0xbd, 0x6b, 0x53, 0x70, 0x94, 0xb7, 0x19, 0xbe, + 0xb7, 0xbd, 0xc1, 0xd8, 0x77, 0x17, 0xf1, 0xe5, 0x1d, 0xf2, 0x65, 0x28, 0x0c, 0x6d, 0xef, 0xc0, + 0xbc, 0xb0, 0x1d, 0x93, 0x87, 0xae, 0x40, 0xa7, 0x03, 0x04, 0x59, 0x8f, 0x1d, 0xed, 0x02, 0x61, + 0x15, 0x9e, 0x84, 0x7d, 0xf5, 0x53, 0x28, 0x53, 0xb3, 0x77, 0xce, 0xe0, 0x6e, 0x9b, 0x1e, 0x43, + 0x5c, 0x05, 0xd9, 0x31, 0x6f, 0x16, 0x42, 0xce, 0x84, 0x29, 0x30, 0x17, 0xde, 0xca, 0x4b, 0xbd, + 0xfd, 0x21, 0x64, 0xd9, 0xc6, 0x0f, 0x11, 0x64, 0xf5, 0x9b, 0xb0, 0x2a, 0x9c, 0x40, 0x0f, 0x72, + 0x63, 0x6c, 0xb2, 0x5c, 0xc2, 0x42, 0x1d, 0x6a, 0x73, 0x4e, 0xf9, 0x22, 0xf5, 0xdf, 0x12, 0x94, + 0xf5, 0x9a, 0xe9, 0x78, 0x35, 0xc7, 0x64, 0x04, 0x44, 0xa7, 0x9e, 0x40, 0xc6, 0x73, 0x85, 0x15, + 0xd5, 0x1d, 0x3f, 0x7b, 0xed, 0x04, 0xd9, 0x6b, 0xc7, 0x08, 0xb2, 0x17, 0x45, 0x2d, 0x61, 0x71, + 0x66, 0x89, 0xc5, 0x6f, 0xfb, 0x59, 0xc4, 0x07, 0x60, 0x2d, 0x50, 0xe1, 0x19, 0x86, 0x27, 0x15, + 0xf2, 0x2e, 0x64, 0x11, 0x03, 0x9f, 0x5b, 0x11, 0x88, 0xc2, 0x64, 0x42, 0xb9, 0x98, 0x01, 0x69, + 0x0e, 0xcf, 0x92, 0x0c, 0x9b, 0x6a, 0x31, 0x69, 0x3a, 0x6e, 0xfd, 0x5d, 0x82, 0xf5, 0x98, 0xcb, + 0xee, 0x08, 0x33, 0x44, 0xee, 0x0c, 0x47, 0xdc, 0x24, 0xf8, 0x4c, 0xed, 0xa4, 0x67, 0x21, 0x5c, + 0x5c, 0x8c, 0x1b, 0xe4, 0xce, 0x2e, 0x7b, 0xd6, 0x50, 0xb8, 0x9c, 0xf0, 0xc7, 0x97, 0x91, 0x0a, + 0xac, 0x8e, 0xae, 0x7c, 0xb5, 0x1c, 0x4f, 0x1f, 0x41, 0xf7, 0x6e, 0x30, 0xbe, 0x0b, 0xc5, 0x0b, + 0xd3, 0x3b, 0xbb, 0x44, 0xa3, 0xc6, 0x03, 0x4f, 0x60, 0x52, 0x09, 0x14, 0x8f, 0x98, 0x88, 0x7d, + 0x17, 0xae, 0x2f, 0xa7, 0x51, 0x65, 0xf5, 0x29, 0x94, 0xb8, 0x5b, 0x8c, 0xc7, 0xa9, 0x52, 0x86, + 0x3a, 0x11, 0xb1, 0xa7, 0xe6, 0x2d, 0x9a, 0x90, 0x3a, 0xcd, 0x30, 0x28, 0x04, 0x00, 0xa5, 0x28, + 0x50, 0x94, 0x4b, 0xd2, 0x51, 0xde, 0x80, 0xa2, 0x1f, 0x03, 0xda, 0x48, 0xb7, 0xaf, 0x58, 0x35, + 0xb3, 0x74, 0xd5, 0xdf, 0x22, 0x9b, 0x8d, 0xff, 0x25, 0x9b, 0xd1, 0x8a, 0xd1, 0xf8, 0x34, 0xe9, + 0x5b, 0x84, 0x85, 0x28, 0x0d, 0x4c, 0xcd, 0x2e, 0x35, 0x75, 0x1f, 0xd6, 0x8d, 0x04, 0x09, 0x03, + 0x68, 0xa5, 0x45, 0xd0, 0xaa, 0x7f, 0x93, 0x60, 0x23, 0x32, 0x4b, 0x64, 0xaa, 0x87, 0x75, 0x11, + 0xcf, 0xd2, 0xe1, 0xf8, 0x9a, 0xbb, 0xb8, 0x46, 0x59, 0x93, 0x3c, 0x8b, 0x9d, 0x1b, 0x59, 0x9e, + 0x4c, 0xde, 0x0a, 0xc9, 0xcb, 0xcc, 0x99, 0x7f, 0x78, 0x08, 0x1c, 0x72, 0x4b, 0x71, 0x78, 0x26, + 0x22, 0x36, 0x4d, 0xf7, 0x78, 0x48, 0x86, 0x8b, 0x34, 0xa7, 0x79, 0x3f, 0x3e, 0xa8, 0x7e, 0x08, + 0x24, 0x89, 0x04, 0x42, 0xf8, 0x6e, 0xfc, 0x3b, 0x5e, 0x8f, 0x62, 0xc8, 0x74, 0x7c, 0xa9, 0xfa, + 0x0f, 0x09, 0x14, 0x23, 0xf8, 0x56, 0x70, 0xdc, 0x65, 0x30, 0x3e, 0x85, 0xdc, 0x29, 0x9e, 0x52, + 0xc3, 0x14, 0x48, 0xfa, 0x8a, 0xe4, 0x1b, 0x2c, 0x27, 0x05, 0x68, 0x2e, 0xd3, 0x67, 0x6a, 0xc1, + 0x81, 0x22, 0xa7, 0x39, 0x50, 0xb2, 0x77, 0x1d, 0x28, 0xcb, 0x41, 0x9d, 0x08, 0x50, 0x5f, 0xc3, + 0x87, 0xfd, 0xb3, 0x80, 0xa2, 0xfe, 0xde, 0x82, 0xa2, 0x77, 0x6f, 0xef, 0x93, 0x38, 0x93, 0x8a, + 0xc4, 0x69, 0x33, 0x8c, 0xf1, 0xf0, 0x19, 0xe6, 0x77, 0x8c, 0x39, 0x8d, 0xf6, 0x17, 0x23, 0xc7, + 0x74, 0x31, 0x14, 0x71, 0x5b, 0xd3, 0x64, 0x19, 0xf2, 0x3e, 0xe4, 0x1d, 0xdb, 0xf6, 0x6a, 0x8b, + 0xd8, 0x10, 0x4a, 0xd5, 0x3d, 0xe4, 0x99, 0xbf, 0x41, 0xfa, 0x43, 0xe7, 0xd3, 0x10, 0xc0, 0xd7, + 0xc0, 0x4e, 0xbc, 0xc1, 0xb0, 0x29, 0xac, 0xa4, 0x0c, 0x41, 0x28, 0x89, 0xe4, 0xfa, 0x01, 0x5e, + 0x7c, 0x16, 0x09, 0x59, 0xbd, 0x39, 0x72, 0xcc, 0xab, 0xa7, 0xe2, 0x7a, 0xe0, 0x77, 0xd4, 0x5f, + 0x49, 0xb0, 0x2a, 0x52, 0xcb, 0xc3, 0x67, 0x61, 0x76, 0xa3, 0x91, 0xc3, 0x1b, 0x0d, 0x2f, 0x3d, + 0x78, 0x6a, 0xf3, 0x13, 0xf0, 0x5a, 0x2c, 0x01, 0x07, 0x89, 0x6d, 0x17, 0xef, 0x92, 0xbe, 0x3d, + 0xec, 0x2b, 0xc9, 0xba, 0xa6, 0x17, 0x54, 0x7f, 0x33, 0xa9, 0x90, 0x0b, 0xb1, 0x68, 0xc8, 0x07, + 0x35, 0x0e, 0xf3, 0x9b, 0x57, 0x62, 0xc2, 0x6f, 0x5e, 0x76, 0x29, 0x7e, 0xd9, 0x25, 0x6e, 0x56, + 0xd8, 0x54, 0xff, 0x29, 0x01, 0x68, 0x35, 0x8d, 0xe5, 0xeb, 0x87, 0xe7, 0x3e, 0x96, 0xac, 0x26, + 0xe7, 0x9d, 0x3c, 0x27, 0xce, 0xbe, 0xe8, 0xde, 0xc7, 0x11, 0xde, 0x54, 0xdc, 0x80, 0x0d, 0x8b, + 0xf3, 0xe7, 0x54, 0x47, 0xfd, 0x8d, 0x0c, 0xc5, 0xd0, 0x53, 0xfc, 0x72, 0x9e, 0x25, 0x2e, 0xe7, + 0x5f, 0x09, 0xef, 0x4e, 0x53, 0xa5, 0x39, 0xf7, 0xf3, 0x14, 0xdc, 0x8d, 0x99, 0x26, 0xa7, 0x30, + 0xed, 0x17, 0x99, 0xd8, 0x9d, 0x7f, 0x13, 0xd6, 0x8f, 0x3a, 0x8d, 0x46, 0xb7, 0xdd, 0xa9, 0xd5, + 0xf4, 0x76, 0x1b, 0xdb, 0xca, 0x1b, 0xe4, 0x2d, 0x20, 0x27, 0x1a, 0x35, 0xea, 0x5a, 0x6c, 0x5c, + 0x22, 0xdb, 0xb0, 0xd9, 0x6c, 0x75, 0x35, 0xc3, 0xa0, 0xf5, 0x83, 0x8e, 0xa1, 0xb7, 0xbb, 0x47, + 0xad, 0x4e, 0xf3, 0x50, 0xc9, 0x63, 0xfc, 0xcb, 0x47, 0x5a, 0xbd, 0xd1, 0xa1, 0x7a, 0xf7, 0xb8, + 0xde, 0xfc, 0x44, 0x6b, 0x28, 0xe7, 0xa4, 0x08, 0xab, 0x62, 0x4c, 0x61, 0xa4, 0x2c, 0x1e, 0x68, + 0x87, 0x5d, 0xaa, 0xff, 0xa8, 0xa3, 0xb7, 0x0d, 0xe5, 0x4f, 0x12, 0x1b, 0x61, 0xe2, 0x6e, 0x13, + 0xff, 0x8c, 0xb6, 0xf2, 0xe7, 0xf8, 0x48, 0xfd, 0x50, 0xf9, 0x8b, 0x84, 0xc6, 0x95, 0xc3, 0x11, + 0xbd, 0xa6, 0x53, 0x43, 0xf9, 0x2b, 0x33, 0x82, 0x84, 0x83, 0xed, 0xfa, 0x0f, 0x9a, 0x9a, 0xc1, + 0xb6, 0xf8, 0x4c, 0xc2, 0xe2, 0x79, 0x33, 0x14, 0x4c, 0x6d, 0x54, 0x3e, 0x0f, 0xd7, 0xe1, 0xe6, + 0x69, 0x3f, 0x61, 0xe6, 0x7d, 0x2e, 0x55, 0x33, 0x8a, 0xa4, 0xfe, 0x1a, 0x0b, 0x7a, 0x0c, 0x41, + 0x58, 0x1d, 0xbf, 0x2c, 0x2d, 0x43, 0xd2, 0x65, 0x16, 0x93, 0xee, 0xa5, 0x23, 0xf4, 0x73, 0x3c, + 0x28, 0xe2, 0x46, 0x21, 0x83, 0x3e, 0x4c, 0x30, 0xe8, 0x9d, 0x08, 0x83, 0x62, 0x9a, 0xf3, 0x68, + 0x84, 0x9f, 0xe2, 0xb1, 0xdb, 0x17, 0x17, 0x5e, 0xf9, 0xda, 0xed, 0xab, 0xef, 0xc5, 0x48, 0x80, + 0xa1, 0x12, 0x71, 0xc6, 0xe0, 0x47, 0xe3, 0xc6, 0x6d, 0x49, 0xde, 0x1d, 0x16, 0xdb, 0x92, 0xd4, + 0x7c, 0x58, 0x5b, 0x3e, 0x93, 0xa0, 0x24, 0xbe, 0x97, 0x97, 0x28, 0xf7, 0xf0, 0x82, 0x56, 0x0e, + 0x07, 0x3e, 0x09, 0x2f, 0xfe, 0x25, 0x9a, 0x18, 0x25, 0xdf, 0x81, 0xc2, 0x6d, 0x6f, 0x60, 0x9d, + 0x1f, 0x39, 0xf6, 0xb5, 0x88, 0xd3, 0xb2, 0xf0, 0x4f, 0x95, 0xc9, 0xb7, 0x60, 0x95, 0x77, 0x0c, + 0x5b, 0x9c, 0xaa, 0xcb, 0xe6, 0x05, 0xaa, 0x4f, 0xbe, 0x0e, 0x30, 0x7d, 0x46, 0x22, 0x05, 0xc8, + 0xe9, 0xb5, 0xc3, 0xb6, 0x86, 0x4e, 0xaf, 0x82, 0x4c, 0xb1, 0x21, 0xb1, 0x06, 0x1b, 0xc9, 0x3c, + 0x39, 0x86, 0x2c, 0xab, 0xe3, 0x48, 0x1e, 0xb2, 0xcd, 0x56, 0x53, 0x47, 0x1d, 0x80, 0x95, 0x5a, + 0xa3, 0xae, 0x37, 0x0d, 0x54, 0xc3, 0xd1, 0x13, 0x5d, 0xa7, 0x4a, 0x86, 0xac, 0x41, 0x01, 0xc9, + 0x5d, 0x3f, 0xd4, 0x8c, 0x16, 0x55, 0xb2, 0x0c, 0x3d, 0xad, 0x73, 0x58, 0x67, 0x9d, 0x3c, 0x6e, + 0x20, 0x6b, 0x8d, 0x86, 0xf2, 0xe2, 0x85, 0xbc, 0xf7, 0xfb, 0x0c, 0x64, 0xf5, 0x9a, 0x76, 0x82, + 0x75, 0xeb, 0x06, 0x3b, 0x7d, 0x6b, 0x1a, 0x23, 0xaa, 0x75, 0x61, 0x9d, 0xe1, 0x49, 0x4f, 0xc2, + 0xe3, 0x81, 0x3f, 0xf8, 0x55, 0x63, 0x9c, 0x26, 0x1f, 0xc3, 0x9b, 0x7e, 0x41, 0x10, 0x99, 0xc1, + 0x4f, 0x80, 0x30, 0x8d, 0xc6, 0x9f, 0x04, 0xaa, 0xdb, 0x73, 0xc7, 0x91, 0xd0, 0x1f, 0xc1, 0x26, + 0xdf, 0x3b, 0xb1, 0xce, 0x56, 0x4c, 0x5f, 0xd4, 0x06, 0xd5, 0x99, 0x5b, 0x35, 0xd9, 0x87, 0x37, + 0x13, 0xd3, 0x0f, 0x26, 0xfc, 0x85, 0x31, 0xb4, 0x97, 0xf5, 0x12, 0xd6, 0x6b, 0x6c, 0x12, 0xab, + 0x1c, 0x96, 0x5b, 0x1f, 0x56, 0x17, 0x91, 0x7d, 0xc5, 0x23, 0xea, 0xde, 0xbf, 0x24, 0x8e, 0x9d, + 0x86, 0x29, 0xbd, 0x14, 0x7d, 0x69, 0x23, 0xdb, 0xf1, 0xa7, 0xab, 0xf0, 0xfd, 0xad, 0x1a, 0xbf, + 0xac, 0xe3, 0xbc, 0x62, 0xe4, 0xb9, 0x68, 0xba, 0x73, 0xfc, 0x0d, 0xa9, 0xba, 0x1e, 0x7d, 0x72, + 0x61, 0x8a, 0x1f, 0xb1, 0x58, 0x25, 0x6c, 0x4f, 0x6f, 0x37, 0xe2, 0x05, 0xbc, 0x0e, 0x74, 0x2f, + 0xb1, 0xaa, 0x25, 0x9b, 0xf1, 0xa8, 0xf0, 0x3a, 0x77, 0x8e, 0xb3, 0xbf, 0x44, 0xa2, 0x18, 0xf7, + 0x23, 0xca, 0x31, 0x6c, 0xcd, 0x10, 0x85, 0xb9, 0xf1, 0x28, 0x76, 0xdc, 0x46, 0x2f, 0xa3, 0xd5, + 0xea, 0x22, 0x11, 0x67, 0xcb, 0x32, 0xef, 0x8d, 0xbb, 0xbc, 0xaf, 0xc1, 0xd6, 0xcc, 0xf4, 0x59, + 0x6b, 0xa2, 0xf7, 0x8e, 0x39, 0x68, 0xfc, 0x51, 0xe2, 0x68, 0x68, 0xff, 0x0f, 0xc6, 0x2c, 0x8a, + 0xa7, 0xb1, 0x34, 0x9e, 0xff, 0x91, 0x60, 0x85, 0x55, 0xd0, 0xf7, 0xfc, 0xf4, 0x37, 0x66, 0x22, + 0x4a, 0xc2, 0x07, 0xa6, 0xe4, 0xcd, 0xa6, 0xfa, 0x68, 0x81, 0x04, 0x83, 0xf9, 0x6d, 0xf6, 0x48, + 0x1d, 0xfb, 0x76, 0x23, 0xe8, 0xc5, 0x2e, 0x05, 0x09, 0x13, 0xbe, 0x3f, 0x0f, 0xf8, 0xca, 0xcc, + 0xd4, 0xc5, 0x5f, 0x6f, 0x5d, 0xf8, 0xaf, 0xbd, 0xfa, 0x52, 0x7f, 0x40, 0x36, 0x68, 0xf7, 0x43, + 0xf2, 0x7b, 0x6c, 0xc6, 0xcd, 0x18, 0x0f, 0x04, 0x6d, 0x5a, 0x63, 0x92, 0x99, 0x92, 0xf0, 0xa6, + 0xba, 0x39, 0xa7, 0x4c, 0x24, 0x87, 0x58, 0xb1, 0x05, 0xe7, 0xac, 0x98, 0xbb, 0x3d, 0xbf, 0x18, + 0xb8, 0xa9, 0x56, 0x16, 0x55, 0x09, 0xa7, 0xfe, 0xff, 0x98, 0xf6, 0xff, 0x1b, 0x00, 0x00, 0xff, + 0xff, 0x84, 0xb1, 0xb8, 0x2a, 0x76, 0x1a, 0x00, 0x00, } diff --git a/membersrvc/protos/ca.proto b/membersrvc/protos/ca.proto index a7a0feb6927..7690e62f314 100644 --- a/membersrvc/protos/ca.proto +++ b/membersrvc/protos/ca.proto @@ -139,19 +139,27 @@ enum Role { } message Registrar { - Identity id = 1; // The identity of the registrar - repeated string roles = 2; // Roles that the registrar can register - repeated string delegateRoles = 3; // Roles that the registrar can give to another to register + Identity id = 1; // The identity of the registrar + repeated string roles = 2; // Roles that the registrar can register + repeated string delegateRoles = 3; // Roles that the registrar can give to another to register } message RegisterUserReq { Identity id = 1; Role role = 2; - string affiliation = 4; // Skipping field number 3 to maintain backward compatibility. It was used before for the primary account. + repeated Attribute attributes = 3; + string affiliation = 4; Registrar registrar = 5; Signature sig = 6; } +message Attribute { + string name = 1; + string value = 2; + string notBefore = 3; + string notAfter = 4; +} + message ReadUserSetReq { Identity req = 1; Role role = 2; // bitmask diff --git a/membersrvc/server.go b/membersrvc/server.go index d1fddc9bb30..763d90f4c8a 100644 --- a/membersrvc/server.go +++ b/membersrvc/server.go @@ -27,6 +27,7 @@ import ( "github.com/hyperledger/fabric/core/crypto" "github.com/hyperledger/fabric/flogging" "github.com/hyperledger/fabric/membersrvc/ca" + "github.com/hyperledger/fabric/metadata" "github.com/op/go-logging" "github.com/spf13/viper" "google.golang.org/grpc" @@ -67,12 +68,12 @@ func main() { // cache configure ca.CacheConfiguration() - logger.Infof("CA Server (" + viper.GetString("server.version") + ")") + logger.Infof("CA Server (" + metadata.Version + ")") aca := ca.NewACA() defer aca.Stop() - eca := ca.NewECA() + eca := ca.NewECA(aca) defer eca.Stop() tca := ca.NewTCA(eca) diff --git a/metadata/metadata.go b/metadata/metadata.go new file mode 100644 index 00000000000..1af5c06dec1 --- /dev/null +++ b/metadata/metadata.go @@ -0,0 +1,19 @@ +/* +Copyright London Stock Exchange 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +var Version string // defined by the Makefile and passed in with ldflags diff --git a/mkdocs.yml b/mkdocs.yml index b81da566fdc..a9586af14fa 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -3,12 +3,15 @@ site_url: http://hyperledger-fabric.readthedocs.io theme: readthedocs repo_url: http://gerrit.hyperledger.org/r/fabric site_description: 'Welcome to the Hyperledger fabric documentation' +strict: true +theme_dir: 'docs/custom_theme' pages: - Home: index.md - Glossary: glossary.md - Protocol Spec: protocol-spec.md - Usecases: biz/usecases.md +- System Chaincode: SystemChaincode-noop.md - Installation and setup: - Chaincode or Application Developer Setup: Setup/Chaincode-setup.md @@ -22,9 +25,9 @@ pages: - Chaincode APIs: API/ChaincodeAPI.md - Core API: API/CoreAPI.md - CA API: API/MemberServicesAPI.md - - System Chaincode: SystemChaincodes/noop.md - Fabric Developer: + - v1.0 Preview: abstract_v1.md - Contributing: CONTRIBUTING.md - Getting an Account: Gerrit/lf-account.md - Gerrit: Gerrit/gerrit.md @@ -34,7 +37,7 @@ pages: - Maintainers: MAINTAINERS.md - Reviewing: Gerrit/reviewing.md - Changes: Gerrit/changes.md - - Style guides: +- Style guides: - Golang: Style-guides/go-style.md - FAQ: @@ -49,6 +52,19 @@ pages: - Attributes: tech/attributes.md - Best Practices: tech/best-practices.md +- NodeSDK: + - App-developer-env-setup: nodeSDK/app-developer-env-setup.md + - App-Overview: nodeSDK/app-overview.md + - Node-SDK-guide: nodeSDK/node-sdk-guide.md + - Node-SDK-indepth: nodeSDK/node-sdk-indepth.md + - Sample-Standalone-app: nodeSDK/sample-standalone-app.md + - Sample-web-app: nodeSDK/sample-web-app.md + +- Starter Kit: + - Starter Kit: starter/fabric-starter-kit.md + +- Releases: releases.md + markdown_extensions: - extra - tables diff --git a/orderer/atomicbroadcast/ab.pb.go b/orderer/atomicbroadcast/ab.pb.go index 3ef247a84e8..29df299ddc5 100644 --- a/orderer/atomicbroadcast/ab.pb.go +++ b/orderer/atomicbroadcast/ab.pb.go @@ -33,6 +33,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // These status codes are intended to resemble selected HTTP status codes type Status int32 @@ -62,6 +68,7 @@ var Status_value = map[string]int32{ func (x Status) String() string { return proto.EnumName(Status_name, int32(x)) } +func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Start may be specified to a specific block number, or may be request from the newest or oldest available // The start location is always inclusive, so the first reply from NEWEST will contain the newest block at the time @@ -89,40 +96,45 @@ var SeekInfo_StartType_value = map[string]int32{ func (x SeekInfo_StartType) String() string { return proto.EnumName(SeekInfo_StartType_name, int32(x)) } +func (SeekInfo_StartType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } type BroadcastResponse struct { - Status Status `protobuf:"varint,1,opt,name=Status,enum=atomicbroadcast.Status" json:"Status,omitempty"` + Status Status `protobuf:"varint,1,opt,name=Status,json=status,enum=atomicbroadcast.Status" json:"Status,omitempty"` } -func (m *BroadcastResponse) Reset() { *m = BroadcastResponse{} } -func (m *BroadcastResponse) String() string { return proto.CompactTextString(m) } -func (*BroadcastResponse) ProtoMessage() {} +func (m *BroadcastResponse) Reset() { *m = BroadcastResponse{} } +func (m *BroadcastResponse) String() string { return proto.CompactTextString(m) } +func (*BroadcastResponse) ProtoMessage() {} +func (*BroadcastResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type BroadcastMessage struct { - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` + Data []byte `protobuf:"bytes,1,opt,name=Data,json=data,proto3" json:"Data,omitempty"` } -func (m *BroadcastMessage) Reset() { *m = BroadcastMessage{} } -func (m *BroadcastMessage) String() string { return proto.CompactTextString(m) } -func (*BroadcastMessage) ProtoMessage() {} +func (m *BroadcastMessage) Reset() { *m = BroadcastMessage{} } +func (m *BroadcastMessage) String() string { return proto.CompactTextString(m) } +func (*BroadcastMessage) ProtoMessage() {} +func (*BroadcastMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type SeekInfo struct { - Start SeekInfo_StartType `protobuf:"varint,1,opt,name=Start,enum=atomicbroadcast.SeekInfo_StartType" json:"Start,omitempty"` - SpecifiedNumber uint64 `protobuf:"varint,2,opt,name=SpecifiedNumber" json:"SpecifiedNumber,omitempty"` - WindowSize uint64 `protobuf:"varint,3,opt,name=WindowSize" json:"WindowSize,omitempty"` + Start SeekInfo_StartType `protobuf:"varint,1,opt,name=Start,json=start,enum=atomicbroadcast.SeekInfo_StartType" json:"Start,omitempty"` + SpecifiedNumber uint64 `protobuf:"varint,2,opt,name=SpecifiedNumber,json=specifiedNumber" json:"SpecifiedNumber,omitempty"` + WindowSize uint64 `protobuf:"varint,3,opt,name=WindowSize,json=windowSize" json:"WindowSize,omitempty"` } -func (m *SeekInfo) Reset() { *m = SeekInfo{} } -func (m *SeekInfo) String() string { return proto.CompactTextString(m) } -func (*SeekInfo) ProtoMessage() {} +func (m *SeekInfo) Reset() { *m = SeekInfo{} } +func (m *SeekInfo) String() string { return proto.CompactTextString(m) } +func (*SeekInfo) ProtoMessage() {} +func (*SeekInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type Acknowledgement struct { - Number uint64 `protobuf:"varint,1,opt,name=Number" json:"Number,omitempty"` + Number uint64 `protobuf:"varint,1,opt,name=Number,json=number" json:"Number,omitempty"` } -func (m *Acknowledgement) Reset() { *m = Acknowledgement{} } -func (m *Acknowledgement) String() string { return proto.CompactTextString(m) } -func (*Acknowledgement) ProtoMessage() {} +func (m *Acknowledgement) Reset() { *m = Acknowledgement{} } +func (m *Acknowledgement) String() string { return proto.CompactTextString(m) } +func (*Acknowledgement) ProtoMessage() {} +func (*Acknowledgement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } // The update message either causes a seek to a new stream start with a new window, or acknowledges a received block and advances the base of the window type DeliverUpdate struct { @@ -132,19 +144,20 @@ type DeliverUpdate struct { Type isDeliverUpdate_Type `protobuf_oneof:"Type"` } -func (m *DeliverUpdate) Reset() { *m = DeliverUpdate{} } -func (m *DeliverUpdate) String() string { return proto.CompactTextString(m) } -func (*DeliverUpdate) ProtoMessage() {} +func (m *DeliverUpdate) Reset() { *m = DeliverUpdate{} } +func (m *DeliverUpdate) String() string { return proto.CompactTextString(m) } +func (*DeliverUpdate) ProtoMessage() {} +func (*DeliverUpdate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } type isDeliverUpdate_Type interface { isDeliverUpdate_Type() } type DeliverUpdate_Acknowledgement struct { - Acknowledgement *Acknowledgement `protobuf:"bytes,1,opt,name=Acknowledgement,oneof"` + Acknowledgement *Acknowledgement `protobuf:"bytes,1,opt,name=Acknowledgement,json=acknowledgement,oneof"` } type DeliverUpdate_Seek struct { - Seek *SeekInfo `protobuf:"bytes,2,opt,name=Seek,oneof"` + Seek *SeekInfo `protobuf:"bytes,2,opt,name=Seek,json=seek,oneof"` } func (*DeliverUpdate_Acknowledgement) isDeliverUpdate_Type() {} @@ -172,8 +185,8 @@ func (m *DeliverUpdate) GetSeek() *SeekInfo { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*DeliverUpdate) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _DeliverUpdate_OneofMarshaler, _DeliverUpdate_OneofUnmarshaler, []interface{}{ +func (*DeliverUpdate) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DeliverUpdate_OneofMarshaler, _DeliverUpdate_OneofUnmarshaler, _DeliverUpdate_OneofSizer, []interface{}{ (*DeliverUpdate_Acknowledgement)(nil), (*DeliverUpdate_Seek)(nil), } @@ -224,20 +237,42 @@ func _DeliverUpdate_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto. } } +func _DeliverUpdate_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DeliverUpdate) + // Type + switch x := m.Type.(type) { + case *DeliverUpdate_Acknowledgement: + s := proto.Size(x.Acknowledgement) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *DeliverUpdate_Seek: + s := proto.Size(x.Seek) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // This is a temporary data structure, meant to hold the place of the finalized block structure // This must be a 'block' structure and not a 'batch' structure, although the terminology is slightly confusing // The requirement is to allow for a consumer of the orderer to declare the unvalidated blockchain as the definitive // blockchain, without breaking the hash chain or existing proof type Block struct { - Number uint64 `protobuf:"varint,2,opt,name=Number" json:"Number,omitempty"` - PrevHash []byte `protobuf:"bytes,3,opt,name=PrevHash,proto3" json:"PrevHash,omitempty"` - Proof []byte `protobuf:"bytes,4,opt,name=Proof,proto3" json:"Proof,omitempty"` - Messages []*BroadcastMessage `protobuf:"bytes,5,rep,name=Messages" json:"Messages,omitempty"` + Number uint64 `protobuf:"varint,2,opt,name=Number,json=number" json:"Number,omitempty"` + PrevHash []byte `protobuf:"bytes,3,opt,name=PrevHash,json=prevHash,proto3" json:"PrevHash,omitempty"` + Proof []byte `protobuf:"bytes,4,opt,name=Proof,json=proof,proto3" json:"Proof,omitempty"` + Messages []*BroadcastMessage `protobuf:"bytes,5,rep,name=Messages,json=messages" json:"Messages,omitempty"` } -func (m *Block) Reset() { *m = Block{} } -func (m *Block) String() string { return proto.CompactTextString(m) } -func (*Block) ProtoMessage() {} +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *Block) GetMessages() []*BroadcastMessage { if m != nil { @@ -253,19 +288,20 @@ type DeliverResponse struct { Type isDeliverResponse_Type `protobuf_oneof:"Type"` } -func (m *DeliverResponse) Reset() { *m = DeliverResponse{} } -func (m *DeliverResponse) String() string { return proto.CompactTextString(m) } -func (*DeliverResponse) ProtoMessage() {} +func (m *DeliverResponse) Reset() { *m = DeliverResponse{} } +func (m *DeliverResponse) String() string { return proto.CompactTextString(m) } +func (*DeliverResponse) ProtoMessage() {} +func (*DeliverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } type isDeliverResponse_Type interface { isDeliverResponse_Type() } type DeliverResponse_Error struct { - Error Status `protobuf:"varint,1,opt,name=Error,enum=atomicbroadcast.Status,oneof"` + Error Status `protobuf:"varint,1,opt,name=Error,json=error,enum=atomicbroadcast.Status,oneof"` } type DeliverResponse_Block struct { - Block *Block `protobuf:"bytes,2,opt,name=Block,oneof"` + Block *Block `protobuf:"bytes,2,opt,name=Block,json=block,oneof"` } func (*DeliverResponse_Error) isDeliverResponse_Type() {} @@ -293,8 +329,8 @@ func (m *DeliverResponse) GetBlock() *Block { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*DeliverResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _DeliverResponse_OneofMarshaler, _DeliverResponse_OneofUnmarshaler, []interface{}{ +func (*DeliverResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DeliverResponse_OneofMarshaler, _DeliverResponse_OneofUnmarshaler, _DeliverResponse_OneofSizer, []interface{}{ (*DeliverResponse_Error)(nil), (*DeliverResponse_Block)(nil), } @@ -342,7 +378,33 @@ func _DeliverResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *prot } } +func _DeliverResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DeliverResponse) + // Type + switch x := m.Type.(type) { + case *DeliverResponse_Error: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Error)) + case *DeliverResponse_Block: + s := proto.Size(x.Block) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + func init() { + proto.RegisterType((*BroadcastResponse)(nil), "atomicbroadcast.BroadcastResponse") + proto.RegisterType((*BroadcastMessage)(nil), "atomicbroadcast.BroadcastMessage") + proto.RegisterType((*SeekInfo)(nil), "atomicbroadcast.SeekInfo") + proto.RegisterType((*Acknowledgement)(nil), "atomicbroadcast.Acknowledgement") + proto.RegisterType((*DeliverUpdate)(nil), "atomicbroadcast.DeliverUpdate") + proto.RegisterType((*Block)(nil), "atomicbroadcast.Block") + proto.RegisterType((*DeliverResponse)(nil), "atomicbroadcast.DeliverResponse") proto.RegisterEnum("atomicbroadcast.Status", Status_name, Status_value) proto.RegisterEnum("atomicbroadcast.SeekInfo_StartType", SeekInfo_StartType_name, SeekInfo_StartType_value) } @@ -351,6 +413,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for AtomicBroadcast service type AtomicBroadcastClient interface { @@ -517,4 +583,47 @@ var _AtomicBroadcast_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("ab.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 576 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x54, 0x4d, 0x6f, 0xda, 0x40, + 0x10, 0x8d, 0x83, 0xed, 0x38, 0x43, 0x12, 0xbb, 0xdb, 0x2a, 0x75, 0x73, 0x88, 0xa8, 0x2b, 0x55, + 0x69, 0x0f, 0xa4, 0xa2, 0xa7, 0x1e, 0x7a, 0xc0, 0xd8, 0x28, 0x96, 0xa8, 0x21, 0x36, 0x24, 0xbd, + 0x21, 0x03, 0x4b, 0x6a, 0x01, 0x5e, 0xcb, 0xeb, 0x04, 0x35, 0xbf, 0x22, 0x52, 0xab, 0xfe, 0x9c, + 0x5e, 0xfa, 0xa3, 0x7a, 0xed, 0x7a, 0x63, 0x28, 0x60, 0xa1, 0x9c, 0x98, 0xf1, 0xbc, 0x37, 0x1f, + 0x6f, 0x9f, 0x00, 0x25, 0x18, 0x54, 0xe3, 0x84, 0xa4, 0x04, 0xa9, 0x41, 0x4a, 0x66, 0xe1, 0x70, + 0x90, 0x90, 0x60, 0x34, 0x0c, 0x68, 0x6a, 0x58, 0xf0, 0xcc, 0x5c, 0x24, 0x1e, 0xa6, 0x31, 0x89, + 0x28, 0x46, 0xe7, 0x20, 0xfb, 0x69, 0x90, 0xde, 0x52, 0x5d, 0xa8, 0x08, 0x67, 0x47, 0xb5, 0x97, + 0xd5, 0x0d, 0x5a, 0xf5, 0xb1, 0xec, 0xc9, 0x94, 0xff, 0x1a, 0x6f, 0x41, 0x5b, 0x76, 0xf9, 0x82, + 0x29, 0x0d, 0x6e, 0x30, 0x42, 0x20, 0x5a, 0x41, 0x1a, 0xf0, 0x16, 0x07, 0x9e, 0x38, 0x62, 0xb1, + 0xf1, 0x47, 0x00, 0xc5, 0xc7, 0x78, 0xe2, 0x44, 0x63, 0x82, 0x3e, 0x81, 0xc4, 0xda, 0x24, 0x69, + 0x3e, 0xe4, 0x4d, 0x71, 0x48, 0x8e, 0xac, 0x72, 0x58, 0xf7, 0x7b, 0x8c, 0x3d, 0x89, 0x66, 0x21, + 0x3a, 0x03, 0xd5, 0x8f, 0xf1, 0x30, 0x1c, 0x87, 0x78, 0xe4, 0xde, 0xce, 0x06, 0x38, 0xd1, 0x77, + 0x59, 0x13, 0xd1, 0x53, 0xe9, 0xfa, 0x67, 0x74, 0x0a, 0x70, 0x1d, 0x46, 0x23, 0x32, 0xf7, 0xc3, + 0x7b, 0xac, 0x97, 0x38, 0x08, 0xe6, 0xcb, 0x2f, 0x46, 0x0d, 0xf6, 0x97, 0xdd, 0x11, 0x80, 0xec, + 0xda, 0xd7, 0xb6, 0xdf, 0xd5, 0x76, 0xb2, 0xb8, 0xdd, 0xb2, 0xb2, 0x58, 0x40, 0x87, 0x0c, 0xd4, + 0xb1, 0x1b, 0x4e, 0xd3, 0xb1, 0x2d, 0x6d, 0xd7, 0x78, 0x07, 0x6a, 0x7d, 0x38, 0x89, 0xc8, 0x7c, + 0x8a, 0x47, 0x37, 0x78, 0x86, 0xa3, 0x14, 0x1d, 0x33, 0xe6, 0xe3, 0x1e, 0x02, 0x1f, 0x21, 0x47, + 0x3c, 0x33, 0x7e, 0x09, 0x70, 0x68, 0xe1, 0x69, 0x78, 0x87, 0x93, 0x5e, 0xcc, 0x24, 0xc0, 0xa8, + 0x55, 0x20, 0x73, 0x4a, 0xb9, 0x56, 0x29, 0xdc, 0xbf, 0x81, 0xbb, 0xd8, 0xf1, 0xd4, 0x60, 0x63, + 0xee, 0x39, 0x88, 0x99, 0x4a, 0xfc, 0xfa, 0x72, 0xed, 0xd5, 0x56, 0x09, 0x19, 0x57, 0xa4, 0x2c, + 0x36, 0x65, 0x10, 0xb3, 0x53, 0x8d, 0x07, 0x01, 0x24, 0x73, 0x4a, 0x86, 0x93, 0x95, 0xd5, 0x77, + 0x57, 0x57, 0x47, 0x27, 0xa0, 0x74, 0x12, 0x7c, 0x77, 0x11, 0xd0, 0x6f, 0x5c, 0xb7, 0x03, 0x4f, + 0x89, 0xf3, 0x1c, 0xbd, 0x00, 0xa9, 0x93, 0x10, 0x32, 0xd6, 0x45, 0x5e, 0x90, 0xe2, 0x2c, 0x41, + 0x9f, 0x41, 0xc9, 0x1f, 0x9f, 0xea, 0x52, 0xa5, 0xc4, 0x16, 0x7a, 0x5d, 0x58, 0x68, 0xd3, 0x26, + 0x9e, 0x32, 0xcb, 0x29, 0xc6, 0x3d, 0xa8, 0xb9, 0x54, 0x2b, 0x46, 0x94, 0xec, 0x24, 0x21, 0xc9, + 0x13, 0x3e, 0x64, 0xd7, 0x49, 0x38, 0xc3, 0xa1, 0x6a, 0x7e, 0x55, 0x2e, 0xc8, 0x71, 0x71, 0x7e, + 0x56, 0xcd, 0xf0, 0x83, 0x2c, 0x58, 0xc8, 0xf1, 0x3e, 0x58, 0x38, 0x1e, 0x95, 0x61, 0xcf, 0xef, + 0x35, 0x1a, 0xb6, 0xef, 0x33, 0x13, 0x68, 0x50, 0x36, 0xeb, 0x56, 0xdf, 0xb3, 0x2f, 0x7b, 0x99, + 0x13, 0x1e, 0x4a, 0xe8, 0x08, 0xf6, 0x9b, 0x6d, 0xcf, 0x74, 0x2c, 0xcb, 0x76, 0xb5, 0x1f, 0x3c, + 0x77, 0xdb, 0xdd, 0x7e, 0xb3, 0xdd, 0x73, 0x2d, 0xed, 0x67, 0x09, 0xe9, 0xf0, 0xdc, 0xb7, 0xbd, + 0x2b, 0xa7, 0x61, 0xf7, 0x7b, 0x6e, 0xfd, 0xaa, 0xee, 0xb4, 0xea, 0x66, 0xcb, 0xd6, 0xfe, 0x96, + 0x6a, 0xbf, 0x05, 0xf6, 0xf2, 0x7c, 0x9b, 0xa5, 0x06, 0xe8, 0x2b, 0xec, 0xff, 0x4f, 0x9e, 0x16, + 0xeb, 0xc4, 0xd8, 0x0e, 0x59, 0x68, 0x66, 0xec, 0x9c, 0x09, 0x1f, 0x04, 0x74, 0x09, 0x7b, 0xb9, + 0x98, 0xe8, 0xb4, 0x40, 0x5a, 0x73, 0xe4, 0x49, 0x65, 0x5b, 0x7d, 0xbd, 0xe5, 0x40, 0xe6, 0x7f, + 0x21, 0x1f, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xb0, 0xf0, 0x26, 0x9a, 0x4e, 0x04, 0x00, 0x00, } diff --git a/peer/chaincode/common.go b/peer/chaincode/common.go index 565f08e16f1..233b43b5447 100755 --- a/peer/chaincode/common.go +++ b/peer/chaincode/common.go @@ -35,10 +35,6 @@ import ( "golang.org/x/net/context" ) -type container struct { - Args []string -} - //getProposal gets the proposal for the chaincode invocation //Currently supported only for Invokes (Queries still go through devops client) func getProposal(cis *pb.ChaincodeInvocationSpec) (*pb.Proposal, error) { @@ -74,11 +70,10 @@ func getChaincodeSpecification(cmd *cobra.Command) (*pb.ChaincodeSpec, error) { } // Build the spec - inputc := container{} - if err := json.Unmarshal([]byte(chaincodeCtorJSON), &inputc); err != nil { + input := &pb.ChaincodeInput{} + if err := json.Unmarshal([]byte(chaincodeCtorJSON), &input); err != nil { return spec, fmt.Errorf("Chaincode argument error: %s", err) } - input := &pb.ChaincodeInput{Args: u.ToChaincodeArgs(inputc.Args...)} var attributes []string if err := json.Unmarshal([]byte(chaincodeAttributesJSON), &attributes); err != nil { @@ -123,7 +118,7 @@ func getChaincodeSpecification(cmd *cobra.Command) (*pb.ChaincodeSpec, error) { } else { // Check if the token is not there and fail if os.IsNotExist(err) { - return spec, fmt.Errorf("User '%s' not logged in. Use the 'login' command to obtain a security token.", chaincodeUsr) + return spec, fmt.Errorf("User '%s' not logged in. Use the 'peer network login' command to obtain a security token.", chaincodeUsr) } // Unexpected error panic(fmt.Errorf("Fatal error when checking for client login token: %s\n", err)) @@ -241,18 +236,17 @@ func checkChaincodeCmdParams(cmd *cobra.Command) error { return fmt.Errorf("Chaincode argument error: %s", err) } m := f.(map[string]interface{}) - if len(m) != 1 { - return fmt.Errorf("Non-empty JSON chaincode parameters must contain exactly 1 key: 'Args'") - } + sm := make(map[string]interface{}) for k := range m { - switch strings.ToLower(k) { - case "args": - default: - return fmt.Errorf("Illegal chaincode key '%s' - must be only 'Args'", k) - } + sm[strings.ToLower(k)] = m[k] + } + _, argsPresent := sm["args"] + _, funcPresent := sm["function"] + if !argsPresent || (len(m) == 2 && !funcPresent) || len(m) > 2 { + return fmt.Errorf("Non-empty JSON chaincode parameters must contain the following keys: 'Args' or 'Function' and 'Args'") } } else { - return errors.New("Empty JSON chaincode parameters must contain exactly 1 key: 'Args'") + return errors.New("Empty JSON chaincode parameters must contain the following keys: 'Args' or 'Function' and 'Args'") } if chaincodeAttributesJSON != "[]" { diff --git a/peer/chaincode/common_test.go b/peer/chaincode/common_test.go new file mode 100644 index 00000000000..2ba6fc592f1 --- /dev/null +++ b/peer/chaincode/common_test.go @@ -0,0 +1,63 @@ +/* + Copyright Digital Asset Holdings, LLC 2016 All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package chaincode + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCheckChaincodeCmdParamsWithNewCallingSchema(t *testing.T) { + chaincodeAttributesJSON = "[]" + chaincodeCtorJSON = `{ "Args":["func", "param"] }` + chaincodePath = "some/path" + require := require.New(t) + result := checkChaincodeCmdParams(nil) + + require.Nil(result) +} + +func TestCheckChaincodeCmdParamsWithOldCallingSchema(t *testing.T) { + chaincodeAttributesJSON = "[]" + chaincodeCtorJSON = `{ "Function":"func", "Args":["param"] }` + chaincodePath = "some/path" + require := require.New(t) + result := checkChaincodeCmdParams(nil) + + require.Nil(result) +} + +func TestCheckChaincodeCmdParamsWithFunctionOnly(t *testing.T) { + chaincodeAttributesJSON = "[]" + chaincodeCtorJSON = `{ "Function":"func" }` + chaincodePath = "some/path" + require := require.New(t) + result := checkChaincodeCmdParams(nil) + + require.Error(result) +} + +func TestCheckChaincodeCmdParamsEmptyCtor(t *testing.T) { + chaincodeAttributesJSON = "[]" + chaincodeCtorJSON = `{}` + chaincodePath = "some/path" + require := require.New(t) + result := checkChaincodeCmdParams(nil) + + require.Error(result) +} diff --git a/peer/core.yaml b/peer/core.yaml index 823222e5bfb..0642ddb4f4c 100644 --- a/peer/core.yaml +++ b/peer/core.yaml @@ -76,10 +76,6 @@ logging: ############################################################################### peer: - # Peer Version following version semantics as described here http://semver.org/ - # The Peer supplies this version in communications with other Peers - version: 0.1.0 - # The Peer id is used for identifying this Peer instance. id: jdoe @@ -115,9 +111,13 @@ peer: snapshot: # Channel size for readonly syncStateSnapshot messages channel # for receiving state deltas for snapshot from oppositie Peer Endpoints. - # NOTE: currently messages are not stored and forwarded, but - # rather lost if the channel write blocks. + # NOTE: when the channel is exhausted, the writes block for up to the + # writeTimeout specified below channelSize: 50 + # Write timeout for the syncStateSnapshot messages + # When the channel above is exhausted, messages block before being + # discarded for this amount of time + writeTimeout: 60s deltas: # Channel size for readonly syncStateDeltas messages channel for # receiving state deltas for a syncBlockRange from oppositie @@ -296,24 +296,24 @@ chaincode: # This is the basis for the Golang Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - from hyperledger/fabric-baseimage - #from utxo:0.1.0 - COPY src $GOPATH/src - WORKDIR $GOPATH + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + COPY src $GOPATH/src + WORKDIR $GOPATH car: # This is the basis for the CAR Dockerfile. Additional commands will # be appended depedendent upon the chaincode specification. Dockerfile: | - FROM hyperledger/fabric-ccenv + FROM hyperledger/fabric-ccenv:$(ARCH)-$(PROJECT_VERSION) + java: # This is an image based on java:openjdk-8 with addition compiler # tools added for java shim layer packaging. # This image is packed with shim layer libraries that are necessary # for Java chaincode runtime. Dockerfile: | - from hyperledger/fabric-javaenv + from hyperledger/fabric-javaenv:$(ARCH)-$(PROJECT_VERSION) # timeout in millisecs for starting up a container and waiting for Register # to come through. 1sec should be plenty for chaincode unit tests @@ -433,8 +433,12 @@ security: # security to be enabled). attributes: enabled: false + + # TCerts pool configuration. Multi-thread pool can also be configured + # by multichannel option switching concurrency in communication with TCA. multithreading: enabled: false + multichannel: false # Confidentiality protocol versions supported: 1.2 confidentialityProtocolVersion: 1.2 diff --git a/peer/network/list.go b/peer/network/list.go index 40da5ff471a..b62e8eb9a76 100644 --- a/peer/network/list.go +++ b/peer/network/list.go @@ -19,8 +19,8 @@ package network import ( "encoding/json" "fmt" - "google/protobuf" + "github.com/golang/protobuf/ptypes/empty" "github.com/hyperledger/fabric/core/peer" pb "github.com/hyperledger/fabric/protos" "github.com/spf13/cobra" @@ -51,7 +51,7 @@ func networkList() (err error) { return } openchainClient := pb.NewOpenchainClient(clientConn) - peers, err := openchainClient.GetPeers(context.Background(), &google_protobuf.Empty{}) + peers, err := openchainClient.GetPeers(context.Background(), &empty.Empty{}) if err != nil { err = fmt.Errorf("Error trying to get peers: %s", err) diff --git a/peer/network/login.go b/peer/network/login.go index 4430f98ec5e..4e6aa1d843a 100644 --- a/peer/network/login.go +++ b/peer/network/login.go @@ -42,7 +42,7 @@ func loginCmd() *cobra.Command { } var networkLoginCmd = &cobra.Command{ - Use: "login", + Use: "login ", Short: "Logs in user to CLI.", Long: `Logs in the local user to CLI. Must supply username as a parameter.`, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/peer/node/status.go b/peer/node/status.go index b58d7e455f2..a060855fc0d 100755 --- a/peer/node/status.go +++ b/peer/node/status.go @@ -18,8 +18,8 @@ package node import ( "fmt" - "google/protobuf" + "github.com/golang/protobuf/ptypes/empty" "github.com/hyperledger/fabric/core/peer" pb "github.com/hyperledger/fabric/protos" "github.com/spf13/cobra" @@ -50,7 +50,7 @@ func status() (err error) { serverClient := pb.NewAdminClient(clientConn) - status, err := serverClient.GetStatus(context.Background(), &google_protobuf.Empty{}) + status, err := serverClient.GetStatus(context.Background(), &empty.Empty{}) if err != nil { logger.Infof("Error trying to get status from local peer: %s", err) err = fmt.Errorf("Error trying to connect to local peer: %s", err) diff --git a/peer/node/stop.go b/peer/node/stop.go index 080bdd5c55b..b0412888f96 100755 --- a/peer/node/stop.go +++ b/peer/node/stop.go @@ -19,12 +19,12 @@ package node import ( "bytes" "fmt" - "google/protobuf" "io/ioutil" "os" "strconv" "syscall" + "github.com/golang/protobuf/ptypes/empty" "github.com/hyperledger/fabric/core/db" "github.com/hyperledger/fabric/core/peer" pb "github.com/hyperledger/fabric/protos" @@ -72,7 +72,7 @@ func stop() (err error) { logger.Info("Stopping peer using grpc") serverClient := pb.NewAdminClient(clientConn) - status, err := serverClient.StopServer(context.Background(), &google_protobuf.Empty{}) + status, err := serverClient.StopServer(context.Background(), &empty.Empty{}) db.Stop() if err != nil { fmt.Println(&pb.ServerStatus{Status: pb.ServerStatus_STOPPED}) diff --git a/peer/version/version.go b/peer/version/version.go index 5c3190ec892..4eaca78da4f 100755 --- a/peer/version/version.go +++ b/peer/version/version.go @@ -19,8 +19,8 @@ package version import ( "fmt" + "github.com/hyperledger/fabric/metadata" "github.com/spf13/cobra" - "github.com/spf13/viper" ) // Cmd returns the Cobra Command for Version @@ -39,6 +39,5 @@ var cobraCommand = &cobra.Command{ // Print outputs the current executable version to stdout func Print() { - version := viper.GetString("peer.version") - fmt.Printf("Fabric peer server version %s\n", version) + fmt.Printf("Fabric peer server version %s\n", metadata.Version) } diff --git a/proposals/r1/Custom-Events-High-level-specification.md b/proposals/r1/Custom-Events-High-level-specification.md new file mode 100644 index 00000000000..5e5bbd6ddd4 --- /dev/null +++ b/proposals/r1/Custom-Events-High-level-specification.md @@ -0,0 +1,98 @@ + +@muralisrini and @pmullaney + +## Introduction + +The events framework supports the ability to emit 2 types of audible events, block and custom/chaincode events (of type ChaincodeEvent defined in events.proto). The basic idea is that clients (event consumers) will register for event types (currently "block" or "chaincode") and in the case of chaincode they may specify additional registration criteria namely chaincodeID and eventname. ChaincodeID indentifies the specific chaincode deployment that the client has interest in seeing events from and eventname is a string that the chaincode developer embeds when invoking the chaincode stub's `SetEvent` API. Invoke transactions are currently the only operations that can emit events and each invoke is limited to one event emitted per transaction. It should be noted that there are no “transient” or “ephemeral” events that are not stored on the ledger. Thus, events are as deterministic as any other transactional data on the block chain. This basic infrastructure can be extended for successful + +## Tactical design: Use TransactionResult to store events + +Add an Event message to TransactionResult + +``` +/ chaincodeEvent - any event emitted by a transaction +type TransactionResult struct { + Uuid string + Result []byte + ErrorCode uint32 + Error string + ChaincodeEvent *ChaincodeEvent +} +``` +Where **ChaincodeEvent** is + +``` +type ChaincodeEvent struct { + ChaincodeID string + TxID string + EventName string + Payload []byte +} +``` + +Where ChaincodeID is the uuid associated with the chaincode, TxID is the transaction that generated the event, EventName is the name given to the event by the chaincode(see `SetEvent`), and Payload is the payload attached to the event by the chaincode. The fabric does not impose a structure or restriction on the Payload although it is good practice to not make it large in size. + +SetEvent API on chaincode Shim + +``` +func (stub *ChaincodeStub) GetState(key string) +func (stub *ChaincodeStub) PutState(key string, value [\]byte) +func (stub *ChaincodeStub) DelState(key string) +… +… +func (stub *ChaincodeStub) SetEvent(name string, payload []byte) +… +… +``` + +When the transaction is completed, SetEvent will result in the event being added to TransactionResult and commited to ledger. The event will then be posted to all clients registered for that event by the event framework. + + +##General Event types and their relation to Chaincode Events + + +Events are associated with event types. Clients register interest in the event types they want to receive events of. +Life-cycle of an Event Type is illustrated by a “block” event + + 1. On boot-up peer adds "block" to the supported event types + 2. Clients can register interest in “block” event type with a peer (or multiple peers) + 3. On Block creation Peers post an event to all registered clients + 4. Clients receive the “block” event and process the transactions in the block + +Chaincode events add an additional level of registration filtering. Instead of registering for all events of a given event type, chaincode events allow clients to register for a specific event from a specific chaincode. For this first version, for simplicity, we have not implemented wildcard or regex matching on the eventname but that is planned. More information on this in "Client Interface" below. + +## Client Interface + +The current interface for client to register and then receive events is a gRPC interface with pb messages defined in protos/events.proto. The Register message is a series (`repeated`) of Interest messages. Each Interest represents a single event registration. + +``` +message Interest { + EventType eventType = 1; + //Ideally we should just have the following oneof for different + //Reg types and get rid of EventType. But this is an API change + //Additional Reg types may add messages specific to their type + //to the oneof. + oneof RegInfo { + ChaincodeReg chaincodeRegInfo = 2; + } +} + +//ChaincodeReg is used for registering chaincode Interests +//when EventType is CHAINCODE +message ChaincodeReg { + string chaincodeID = 1; + string eventName = 2; + ~~bool anyTransaction = 3;~~ //TO BE REMOVED +} + +//Register is sent by consumers for registering events +//string type - "register" +message Register { + repeated Interest events = 1; +} + +``` + +As mentioned in previous section, clients should register for chaincode events using `ChaincodeReg` message where `chaincodeID` refers to the ID of chaincode as returned by the deploy transaction and `eventName` refers to the name of the event posted by the chaincode. Setting `eventName` with empty string (or "*") will cause all events from a chaincode to be sent to the listener. + +There is a service defined in events.proto with a single method that receives a stream of registration events and returns an stream that the client can use to read events from as they occur. diff --git a/proposals/r1/Next-Consensus-Architecture-Proposal.md b/proposals/r1/Next-Consensus-Architecture-Proposal.md new file mode 100644 index 00000000000..cb01e36f645 --- /dev/null +++ b/proposals/r1/Next-Consensus-Architecture-Proposal.md @@ -0,0 +1,756 @@ + +Authors: Elli Androulaki, Christian Cachin, Angelo De Caro, Konstantinos Christidis, Chet Murthy, Binh Nguyen, Alessandro Sorniotti, and Marko Vukolić + +This page documents the architecture of a blockchain infrastructure with the roles of a blockchain node separated into roles of *peers* (who maintain state/ledger) and *consenters* (who consent on the order of transactions included in the blockchain state). In common blockchain architectures (including Hyperledger fabric as of July 2016) these roles are unified (cf. *validating peer* in Hyperledger fabric). The architecture also introduces *endorsing peers* (endorsers), as special type of peers responsible for simulating execution and *endorsing* transactions (roughly corresponding to executing/validating transactions in HL fabric 0.5-developer-preview). + +The architecture has the following advantages compared to the design in which peers/consenters/endorsers are unified. + +* **Chaincode trust flexibility.** The architecture separates *trust assumptions* for chaincodes (blockchain applications) from trust assumptions for consensus. In other words, the consensus service may be provided by one set of nodes (consenters) and tolerate some of them to fail or misbehave, and the endorsers may be different for each chaincode. + +* **Scalability.** As the endorser nodes responsible for particular chaincode are orthogonal to the consenters, the system may *scale* better than if these functions were done by the same nodes. In particular, this results when different chaincodes specify disjoint endorsers, which introduces a partitioning of chaincodes between endorsers and allows parallel chaincode execution (endorsement). Besides, chaincode execution, which can potentially be costly, is removed from the critical path of the consensus service. + +* **Confidentiality.** The architecture facilitates deployment of chaincodes that have *confidentiality* requirements with respect to the content and state updates of its transactions. + +* **Consensus modularity.** The architecture is *modular* and allows pluggable consensus implementations. + + +## Table of contents + +1. System architecture +1. Basic workflow of transaction endorsement +1. Endorsement policies +1. Blockchain data structures +1. State transfer and checkpointing +1. Confidentiality + +--- + +## 1. System architecture + +The blockchain is a distributed system consisting of many nodes that communicate with each other. The blockchain runs programs called chaincode, holds state and ledger data, and executes transactions. The chaincode is the central element: transactions are operations invoked on the chaincode and only chaincode changes the state. Transactions have to be "endorsed" and only endorsed transactions are committed and have an effect on the state. There may exist one or more special chaincodes for management functions and parameters, collectively called *system chaincodes*. + + +### 1.1. Transactions + +Transactions may be of two types: + +* *Deploy transactions* create new chaincode and take a program as parameter. When a deploy transaction executes successfully, the chaincode has been installed "on" the blockchain. + +* *Invoke transactions* perform an operation in the context of previously deployed chaincode. An invoke transaction refers to a chaincode and to one of its provided functions. When successful, the chaincode executes the specified function - which may involve modifying the corresponding state, and returning an output. + +As described later, deploy transactions are special cases of invoke transactions, where a deploy transaction that creates new chaincode, corresponds to an invoke transaction on a system chaincode. + +**Remark:** *This document currently assumes that a transaction either creates new chaincode or invokes an operation provided by _one_ already deployed chaincode. This document does not yet describe: a) support for cross-chaincode transactions, b) optimizations for query (read-only) transactions.* + +### 1.2. State + +**Blockchain state.** The state of the blockchain ("world state") has a simple structure and is modeled as a versioned key/value store (KVS), where keys are names and values are arbitrary blobs. These entries are manipulated by the chaincodes (applications) running on the blockchain through `put` and `get` KVS-operations. The state is stored persistently and updates to the state are logged. Notice that versioned KVS is adopted as state model, an implementation may use actual KVSs, but also RDBMSs or any other solution. + +More formally, blockchain state `s` is modeled as an element of a mapping `K -> (V X N)`, where: + +* `K` is a set of keys +* `V` is a set of values +* `N` is an infinite ordered set of version numbers. Injective function `next: N -> N` takes an element of `N` and returns the next version number. + +Both `V` and `N` contain a special element `\bot`, which is in case of `N` the lowest element. Initially all keys are mapped to `(\bot,\bot)`. For `s(k)=(v,ver)` we denote `v` by `s(k).value`, and `ver` by `s(k).version`. + +KVS operations are modeled as follows: + +* `put(k,v)`, for `k\in K` and `v\in V`, takes the blockchain state `s` and changes it to `s'` such that `s'(k)=(v,next(s(k).version))` with `s'(k')=s(k')` for all `k'!=k`. +* `get(k)` returns `s(k)`. + +**State partitioning.** Keys in the KVS can be recognized from their name to belong to a particular chaincode, in the sense that only transaction of a certain chaincode may modify the keys belonging to this chaincode. In principle, any chaincode can read the keys belonging to other chaincodes (state of the confidential chaincodes cannot be read in clear - see Section 6). *Support for cross-chaincode transactions, that modify the state belonging to two or more chaincodes will be added in future.* + +**Ledger.** Evolution of blockchain state (history) is kept in a *ledger*. Ledger is a hashchain of blocks of transactions. Transactions in the ledger are totally ordered. + +Blockchain state and ledger are further detailed in Section 4. + +### 1.3. Nodes + +Nodes are the communication entities of the blockchain. A "node" is only a logical function in the sense that multiple nodes of different types can run on the same physical server. What counts is how nodes are grouped in "trust domains" and associated to logical entities that control them. + +There are three types of nodes: + +1. **Client** or **submitting-client**: a client that submits an actual transaction-invocation. + +2. **Peer**: a node that commits transactions and maintains the state and a copy of the ledger. Besides, peers can have two special roles: + + a. A **submitting peer** or **submitter**, + + b. An **endorsing peer** or **endorser**. + + +3. **Consensus-service-node** or **consenter**: a node running the communication service that implements a delivery guarantee (such as atomic broadcast) typically by running consensus. + +Notice that consenters and clients do not maintain ledgers and blockchain state, only peers do. + +The types of nodes are explained next in more detail. + +#### 1.3.1. Client + +The client represents the entity that acts on behalf of an end-user. It must connect to a peer for communicating with the blockchain. The client may connect to any peer of its choice. Clients create and thereby invoke transactions. + +#### 1.3.2. Peer + +A peer communicates with the consensus service and maintain the blockchain state and the ledger. Such peers receive ordered state updates from the consensus service and apply them to the locally held state. + +Peers can additionally take up one of two roles described next. + +* **Submitting peer.** A *submitting peer* is a special role of a peer that provides an interface to clients, such that a client may connect to a submitting peer for invoking transactions and obtaining results. The peer communicates with the other blockchain nodes on behalf of one or more clients for executing the transaction. + + +* **Endorsing peer.** The special function of an *endorsing peer* occurs with respect to a particular chaincode and consists in *endorsing* a transaction before it is committed. Every chaincode may specify an *endorsement policy* that may refer to a set of endorsing peers. The policy defines the necessary and sufficient conditions for a valid transaction endorsement (typically a set of endorsers' signatures), as described later in Sections 2 and 3. In the special case of deploy transactions that install new chaincode the (deployment) endorsement policy is specified as an endorsement policy of the system chaincode. + +To emphasize a peer that does not also have a role of a submitting peer or an endorsing peer, such a peer is sometimes referred to as a *committing peer*. + + +#### 1.3.3. Consensus service nodes (Consenters) + +The *consenters* form the *consensus service*, i.e., a communication fabric that provides delivery guarantees. The consensus service can be implemented in different ways: ranging from a centralized service (used e.g., in development and testing) to distributed protocols that target different network and node fault models. + +Peers are clients of the consensus service, to which the consensus service provides a shared *communication channel* offering a broadcast service for messages containing transactions. Peers connect to the channel and may send and receive messages on the channel. The channel supports *atomic* delivery of all messages, that is, message communication with total-order delivery and (implementation specific) reliability. In other words, the channel outputs the same messages to all connected peers and outputs them to all peers in the same logical order. This atomic communication guarantee is also called *total-order broadcast*, *atomic broadcast*, or *consensus* in the context of distributed systems. The communicated messages are the candidate transactions for inclusion in the blockchain state. + + +**Partitioning (consensus channels).** Consensus service may support multiple *channels* similar to the *topics* of a publish/subscribe (pub/sub) messaging system. Clients can connects to a given channel and can then send messages and obtain the messages that arrive. Channels can be thought of as partitions - clients connecting to one channel are unaware of the existence of other channels, but clients may connect to multiple channels. For simplicity, in the rest of this document and unless explicitly mentioned otherwise, we assume consensus service consists of a single channel/topic. + + +**Consensus service API.** Peers connect to the channel provided by the consensus service, via the interface provided by the consensus service. The consensus service API consists of two basic operations (more generally *asynchronous events*): + +* `broadcast(blob)`: the submitting peer calls this to broadcast an arbitrary message `blob` for dissemination over the channel. This is also called `request(blob)` in the BFT context, when sending a request to a service. + +* `deliver(seqno, prevhash, blob)`: the consensus service calls this on the peer to deliver the message `blob` with the specified non-negative integer sequence number (`seqno`) and hash of the most recently delivered blob (`prevhash`). In other words, it is an output event from the consensus service. `deliver()` is also sometimes called `notify()` in pub-sub systems or `commit()` in BFT systems. + + + Notice that consensus service clients (i.e., peers) interact with the service only through `broadcast()` and `deliver()` events. + +**Consensus properties.** The guarantees of the consensus service (or atomic-broadcast channel) are as follows. They answer the following question: *What happens to a broadcasted message and what relations exist among delivered messages?* + +1. **Safety (consistency guarantees)**: As long as peers are connected for sufficiently long periods of time to the channel (they can disconnect or crash, but will restart and reconnect), they will see an *identical* series of delivered `(seqno, prevhash, blob)` messages. This means the outputs (`deliver()` events) occur in the *same order* on all peers and according to sequence number and carry *identical content* (`blob` and `prevhash`) for the same sequence number. Note this is only a *logical order*, and a `deliver(seqno, prevhash, blob)` on one peer is not required to occur in any real-time relation to `deliver(seqno, prevhash, blob)` that outputs the same message at another peer. Put differently, given a particular `seqno`, *no* two correct peers deliver *different* `prevhash` or `blob` values. Moreover, no value `blob` is delivered unless some consensus client (peer) actually called `broadcast(blob)` and, preferably, every broadcasted blob is only delivered *once*. + + Furthermore, the `deliver()` event contains the cryptographic hash of the previous `deliver()` event (`prevhash`). When the consensus service implements atomic broadcast guarantees, `prevhash` is the cryptographic hash of the parameters from the `deliver()` event with sequence number `seqno-1`. This establishes a hash chain across `deliver()` events, which is used to help verify the integrity of the consensus output, as discussed in Sections 4 and 5 later. In the special case of the first `deliver()` event, `prevhash` has a default value. + + +1. **Liveness (delivery guarantee)**: Liveness guarantees of the consensus service are specified by a consensus service implementation. The exact guarantees may depend on the network and node fault model. + + In principle, if the submitting does not fail, the consensus service should guarantee that every correct peer that connects to the consensus service eventually delivers every submitted transaction. + + +To summarize, the consensus service ensures the following properties: + +* *Agreement.* For any two events at correct peers `deliver(seqno, prevhash0, blob0)` and `deliver(seqno, prevhash1, blob1)` with the same `seqno`, `prevhash0==prevhash1` and `blob0==blob1`; +* *Hashchain integrity.* For any two events at correct peers `deliver(seqno-1, prevhash0, blob0)` and `deliver(seqno, prevhash, blob)`, `prevhash = HASH(seqno-1||prevhash0||blob0)`. +* *No skipping*. If a consensus service outputs `deliver(seqno, prevhash, blob)` at a correct peer *p*, such that `seqno>0`, then *p* already delivered an event `deliver(seqno-1, prevhash0, blob0)`. +* *No creation*. Any event `deliver(seqno, prevhash, blob)` at a correct peer must be preceded by a `broadcast(blob)` event at some (possibly distinct) peer; +* *No duplication (optional, yet desirable)*. For any two events `broadcast(blob)` and `broadcast(blob')`, when two events `deliver(seqno0, prevhash0, blob)` and `deliver(seqno1, prevhash1, blob')` occur at correct peers and blob == blob', then `seqno0==seqno1` and `prevhash0==prevhash1`. +* *Liveness*. If a correct peer invokes an event `broadcast(blob)` then every correct peer "eventually" issues an event `deliver(*, *, blob)`, where `*` denotes an arbitrary value. + + +## 2. Basic workflow of transaction endorsement + +In the following we outline the high-level request flow for a transaction. + +**Remark:** *Notice that the following protocol _does not_ assume that all transactions are deterministic, i.e., it allows for non-deterministic transactions.* + +### 2.1. The client creates a transaction and sends it to a submitting peer of its choice + +To invoke a transaction, the client sends the following message to a submitting peer `spID`. + +``, where + +- `tx=`, where + - `clientID` is an ID of the submitting client, + - `chaincodeID` refers to the chaincode to which the transaction pertains, + - `txPayload` is the payload containing the submitted transaction itself, + - `clientSig` is signature of a client on other fields of `tx`. +- `retryFlag` is a boolean that tells the submitting peer whether to retry the submission of the transaction in case transaction fails, + +The details of `txPayload` will differ between invoke transactions and +deploy transactions (i.e., invoke transactions referring to +a deploy-specific system chaincode). For an **invoke transaction**, +`txPayload` would consist of one field + +- `invocation = `, where + - `operation` denotes the chaincode operation (function) and arguments, + - `metadata` denotes attributes related to the invocation. + +For a **deploy transaction**, `txPayload` would consist of two fields + +- `chainCode = `, where + - `source` denotes the source code of the chaincode, + - `metadata` denotes attributes related to the chaincode and application, +- `policies` contains policies related to the chaincode that are accessible to all peers, such as the endorsement policy + +**TODO:** Decide whether to include explicitly local/logical time at the client (a timestamp). + +### 2.2. The submitting peer prepares a transaction and sends it to endorsers for obtaining an endorsement + +On reception of a `` message from a client, the submitting peer first verifies the client's signature `clientSig` and then prepares a transaction. This involves submitting peer tentatively *executing* a transaction (`txPayload`), by invoking the chaincode to which the transaction refers (`chaincodeID`) and the copy of the state that the submitting peer locally holds. + +As a result of the execution, the submitting peer computes a _state update_ (`stateUpdate`) and _version dependencies_ (`verDep`), also called *MVCC+postimage info* in DB language. + +Recall that the state consists of key/value (k/v) pairs. All k/v entries are versioned, that is, every entry contains ordered version information, which is incremented every time when the value stored under a key is updated. The peer that interprets the transaction records all k/v pairs accessed by the chaincode, either for reading or for writing, but the peer does not yet update its state. More specifically: + +* `verDep` is a tuple `verDep=(readset,writeset)`. Given state `s` before a submitting peer executes a transaction: + * for every key `k` read by the transaction, pair `(k,s(k).version)` is added to `readset`. + * for every key `k` modified by the transaction, pair `(k,s(k).version)` is added to `writeset`. +* additionally, for every key `k` modified by the transaction to the new value `v'`, pair `(k,v')` is added to `stateUpdate`. Alternatively, `v'` could be the delta of the new value to previous value (`s(k).value`). + +An implementation may combine `verDep.writeset` with `stateUpdate` into a single data structure. + +Then, `tran-proposal := (spID,chaincodeID,txContentBlob,stateUpdate,verDep)`, + +where `txContentBlob` is chaincode/transaction specific information. The intention is to have `txContentBlob` used as some representation of `tx` (e.g., `txContentBlob=tx.txPayload`). More details are given in Section 6. + +Cryptographic hash of `tran-proposal` is used by all nodes as a unique transaction identifier `tid` (i.e., `tid=HASH(tran-proposal)`). + +The submitting peer then sends the transaction (i.e., `tran-proposal`) to the endorsers for the chaincode concerned. The endorsing peers are selected according to the interpretation of the policy and the availability of peers, known by the peers. For example, the transaction could be sent to *all* endorsers of a given `chaincodeID`. That said, some endorsers could be offline, others may object and choose not to endorse the transaction. The submitting peer tries to satisfy the policy expression with the endorsers available. + +The submitting peer `spID` sends the transaction to an endorsing peer `epID` using the following message: + +`` + +**Possible optimization:** An implementation may optimize duplication of `chaincodeID` in `tx.chaincodeID` and `tran-proposal.chaincodeID`, as well as possible duplication of `txPayload` in `tx.txPayload` and `tran-proposal.txContentBlob`. + +Finally, the submitting peer stores `tran-proposal` and `tid` in memory and waits for responses from endorsing peers. + +**Alternative design:** *As described here the submitting peer communicates directly with the endorsers. This could also be a function performed by the consensus service; in this case it should be determined whether fabric has to follow atomic broadcast delivery guarantee for this or use simple peer-to-peer communication. In that case the consensus service would also be responsible to collect the endorsements according to the policy and to return them to the submitting peer.* + +**TODO:** Decide on communication between submitting peers and endorsing peers: peer-to-peer or using the consensus service. + +### 2.3. An endorser receives and endorses a transaction + +When a transaction is delivered to a connected endorsing peer for the chaincode `tran-proposal.chaincodeID` by means of a `PROPOSE` message, the endorsing peer performs the following steps: + +* The endorser verifies `tx.clientSig` and ensures `tx.chaincodeID==tran-proposal.chaincodeID`. + +* The endorser simulates the transaction (using `tx.txPayload`) and verifies that the state update and dependency information are correct. If everything is valid, the peer digitally signs the statement `(TRANSACTION-VALID, tid)` producing signature `epSig`. The endorsing peer then sends `` message to the submitting peer (`tran-proposal.spID`). + +* Else, in case the transaction simulation at endorsers fails to produce results from `tran-proposal`, we distinguish the following cases: + + a. if the endorser obtains different state updates than those in `tran-proposal.stateUpdates`, the peer signs a statement `(TRANSACTION-INVALID, tid, INCORRECT_STATE)` and sends the signed statement to the submitting peer. + + b. if the endorser is aware of more advanced data versions than those referred to in `tran-proposal.verDeps`, it signs a statement `(TRANSACTION-INVALID, tid, STALE_VERSION)` and sends the signed statement to the submitting peer. + + c. if the endorser does not want to endorse a transaction for any other reason (internal endorser policy, error in a transaction, etc.) it signs a statement `(TRANSACTION-INVALID, tid, REJECTED)` and sends the signed statement to the submitting peer. + +Notice that an endorser does not change its state in this step, the updates are not logged! + +**Alternative design:** *An endorsing peer may omit to inform the submitting peer about an invalid transaction altogether, without sending explicit `TRANSACTION-INVALID` notifications.* + +**Alternative design:** *The endorsing peer submits the `TRANSACTION-VALID`/`TRANSACTION-INVALID` statement and signature to the consensus service for delivery.* + +**TODO:** Decide on alternative designs above. + +### 2.4. The submitting peer collects an endorsement for a transaction and broadcasts it through consensus + +The submitting peer waits until it receives enough messages and signatures on `(TRANSACTION-VALID, tid)` statements to conclude that the transaction proposal is endorsed (including possibly its own signature). This will depend on the chaincode endorsement policy (see also Section 3). If the endorsement policy is satisfied, the transaction has been *endorsed*; note that it is not yet committed. The collection of signatures from endorsing peers which establish that a transaction is endorsed is called an *endorsement*, the peer stores them in `endorsement`. + +If the submitting peer does not manage to collect an endorsement for a transaction proposal, it abandons this transaction and notifies the submitting client. If `retryFlag` has been originally set by the submitting client (see step 1 and the `SUBMIT` message) the submitting peer may (depending on submitting peer policies) retry the transaction (from step 2). + +For transaction with a valid endorsement, we now start using the consensus-fabric. The submitting peer invokes consensus service using the `broadcast(blob)`, where `blob=(tran-proposal, endorsement)`. + +### 2.5. The consensus service delivers a transactions to the peers + +When an event `deliver(seqno, prevhash, blob)` occurs and a peer has applied all state updates for blobs with sequence number lower than `seqno`, a peer does the following: + +* It checks that the `blob.endorsement` is valid according to the policy of the chaincode (`blob.tran-proposal.chaincodeID`) to which it refers. (This step might be performed even without waiting for applying the state updates with sequence numbers smaller than `seqno`.) + +* It verifies that the dependencies (`blob.tran-proposal.verDep`) have not been violated meanwhile. + +Verification of dependencies can be implemented in different ways, according to a consistency property or "isolation guarantee" that is chosen for the state updates. For example, **serializability** can be provided by requiring the version associated with each key in the `readset` or `writeset` to be equal to that key's version in the state, and rejecting transactions that do not satisfy this requirement. As another example, one can provide **snapshot isolation** when all keys in the `writeset` still have the same version as in the state as in the dependency data. The database literature contains many more isolation guarantees. + +**TODO:** Decide whether to insist on serializability or allow chaincode to specify isolation level. + +* If all these checks pass, the transaction is deemed *valid* or *committed*. This means that a peer appends the transaction to the ledger and subsequently applies `blob.tran-proposal.stateUpdates` to blockchain state. Only committed transactions may change the state. + +* If any of these checks fail, the transaction is invalid and the peer drops the transaction. It is important to note that invalid transactions are not committed, do not change the state, and are not recorded. + +Additionally, the submitting peer notifies the client of a dropped transaction. If `retryFlag` has been originally set by the submitting client (see step 1 and the `SUBMIT` message) the submitting peer may (depending on submitting peer policies) retry the transaction (from step 2). + +![Illustration of the transaction flow (common-case path).](flow-2.png) + +Figure 1. Illustration of the transaction flow (common-case path). + +--- + +## 3. Endorsement policies + +### 3.1. Endorsement policy specification + +An **endorsement policy**, is a condition on what _endorses_ a transaction. An endorsement policy is specified by a `deploy` transaction that installs specific chaincode. A transaction is declared valid only if it has been endorsed according to the policy. An invoke transaction for a chaincode will first have to obtain an *endorsement* that satisfies the chaincode's policy or it will not be committed. This takes place through the interaction between the submitting peer and endorsing peers as explained in Section 2. + +Formally the endorsement policy is a predicate on the transaction, endorsement, and potentially further state that evaluates to TRUE or FALSE. For deploy transactions the endorsement is obtained according to a system-wide policy (for example, from the system chaincode). + +Formally an endorsement policy is a predicate referring to certain variables. Potentially it may refer to: + +1. keys or identities relating to the chaincode (found in the metadata of the chaincode), for example, a set of endorsers; +2. further metadata of the chaincode; +3. elements of the transaction itself; +4. and potentially more. + +The evaluation of an endorsement policy predicate must be deterministic. Endorsement policies must not be complex and cannot be "mini chaincode". The endorsement policy specification language must be limited and enforce determinism. + +The list is ordered by increasing expressiveness and complexity, that is, it will be relatively simple to support policies that only refer to keys and identities of nodes. + +**TODO:** Decide on parameters of the endorsement policy. + +The predicate may contain logical expressions and evaluates to TRUE or FALSE. Typically the condition will use digital signatures on the transaction invocation issued by endorsing peers for the chaincode. + +Suppose the chaincode specifies the endorser set `E = {Alice, Bob, Charlie, Dave, Eve, Frank, George}`. Some example policies: + +- A valid signature from all members of E. + +- A valid signature from any single member of E. + +- Valid signatures from endorsing peers according to the condition + `(Alice OR Bob) AND (any two of: Charlie, Dave, Eve, Frank, George)`. + +- Valid signatures by any 5 out of the 7 endorsers. (More generally, for chaincode with `n > 3f` endorsers, valid signatures by any `2f+1` out of the `n` endorsers, or by any group of *more* than `(n+f)/2` endorsers.) + +- Suppose there is an assignment of "stake" or "weights" to the endorsers, + like `{Alice=49, Bob=15, Charlie=15, Dave=10, Eve=7, Frank=3, George=1}`, + where the total stake is 100: The policy requires valid signatures from a + set that has a majority of the stake (i.e., a group with combined stake + strictly more than 50), such as `{Alice, X}` with any `X` different from + George, or `{everyone together except Alice}`. And so on. + +- The assignment of stake in the previous example condition could be static (fixed in the metadata of the chaincode) or dynamic (e.g., dependent on the state of the chaincode and be modified during the execution). + +How useful these policies are will depend on the application, on the desired resilience of the solution against failures or misbehavior of endorsers, and on various other properties. + + +### 3.2. Implementation + +Typically, endorsement policies will be formulated in terms of signatures required from endorsing peers. The metadata of the chaincode must contain the corresponding signature verification keys. + +An endorsement will typically consist of a set of signatures. It can be evaluated locally by every peer or by every consenter node with access to the chaincode metadata (which includes these keys), such that this node does *not* require interaction with another node. Neither does a node need access to the state for verifying an endorsement. + +Endorsements that refer to other metadata of the chaincode can be evaluated in the same way. + +**TODO:** Formalize endorsement policies and design an implementation. + + + +--- + +## 4. Blockchain data structures + +The blockchain consists of three data structures: a) *raw ledger*, b) *blockchain state* and c) *validated ledger*). Blockchain state and validated ledger are maintained for efficiency - they can be derived from the raw ledger. + +* *Raw ledger (RL)*. The raw ledger contains all data output by the consensus service at peers. It is a sequence of `deliver(seqno, prevhash, blob)` events, which form a hash chain according to the computation of `prevhash` described before. The raw ledger contains information about both *valid* and *invalid* transactions and provides a verifiable history of all successful and unsuccessful state changes and attempts to change state, occurring during the operation of the system. + + The RL allows peers to replay the history of all transactions and to reconstruct the blockchain state (see below). It also provides submitting peer with information about *invalid* (uncommitted) transactions, on which submitting peers can act as described in Section 2.5. + + +* *(Blockchain) state*. The state is maintained by the peers (in the form of a KVS) and is derived from the raw ledger by **filtering out invalid transactions** as described in Section 2.5 (Step 5 in Figure 1) and applying valid transactions to state (by executing `put(k,v)` for every `(k,v)` pair in `stateUpdate` or, alternatively, applying state deltas with respect to previous state). + + Namely, by the guarantees of consensus, all correct peers will receive an identical sequence of `deliver(seqno, prevhash, blob)` events. As the evaluation of the endorsement policy and evaluation of version dependencies of a state update (Section 2.5) are deterministic, all correct peers will also come to the same conclusion whether a transaction contained in a blob is valid. Hence, all peers commit and apply the same sequence of transactions and update their state in the same way. + +* *Validated ledger (VL)*. To maintain the abstraction of a ledger that contains only valid and committed transactions (that appears in Bitcoin, for example), peers may, in addition to state and raw ledger, maintain the *validated ledger*. This is a hash chain derived from the raw ledger by filtering out invalid transactions. + + +###4.1. Batch and block formation + +Instead of outputting individual transactions (blobs), the consensus service may output *batches* of blobs. In this case, the consensus service must impose and convey a deterministic ordering of the blobs within each batch. The number of blobs in a batch may be chosen dynamically by a consensus implementation. + +Consensus batching does not impact the construction of the raw ledger, which remains a hash chain of blobs. But with batching, the raw ledger becomes a hash chain of batches rather than hash chain of individual blobs. + +With batching, the construction of the (optional) validated ledger *blocks* proceeds as follows. As the batches in the raw ledger may contain invalid transactions (i.e., transactions with invalid endorsement or with invalid version dependencies), such transactions are filtered out by peers before a transaction in a batch becomes committed in a block. Every peer does this by itself. A block is defined as a consensus batch without the invalid transactions, that have been filtered out. Such blocks are inherently dynamic in size and may be empty. An illustration of block construction is given in the figure below. + ![Illustration of the transaction flow (common-case path).](blocks-2.png) + +Figure 2. Illustration of validated ledger block formation from raw ledger batches. + + +###4.2. Chaining the blocks + +The batches of the raw ledger output by the consensus service form a hash chain, as described in Section 1.3.3. + +The blocks of the validated ledger are chained together to a hash chain by every peer. The valid and committed transactions from the batch form a block; all blocks are chained together to form a hash chain. + +More specifically, every block of a validated ledger contains: + +* The hash of the previous block. + +* Block number. + +* An ordered list of all valid transactions committed by the peers since the last block was computed (i.e., list of valid transactions in a corresponding batch). + +* The hash of the corresponding batch from which the current block is derived. + +All this information is concatenated and hashed by a peer, producing the hash of the block in the validated ledger. + +## 5. State transfer and checkpointing + +In the common case, during normal operation, a peer receives a sequence of `deliver()` events (containing batches of transactions) from the consensus service. It appends these batches to its raw ledger and updates the blockchain state and the validated ledger accordingly. + +However, due to network partitions or temporary outages of peers, a peer may miss several batches in the raw ledger. In this case, a peer must *transfer state* of the raw ledger from other peers in order to catch up with the rest of the network. This section describes a way to implement this. + +###5.1. Raw ledger state transfer (batch transfer) + +To illustrate how basic *state transfer* works, assume that the last batch in a local copy of the raw ledger at a peer *p* has sequence number 25 (i.e., `seqno` of the last received `deliver()` event equals `25`). After some time peer *p* receives `deliver(seqno=54,hash53,blob)` event from the consensus service. + +At this moment, peer *p* realizes that it misses batches 26-53 in its copy of the raw ledger. To obtain the missing batches, *p* resorts to peer-to-peer communication with other peers. It calls out and asks other peers for returning the missing batches. While it transfers the missing state, *p* continues listening to the consensus service for new batches. + +Notice that *p* *does not need to trust any of its peers* from which it obtains the missing batches via state transfer. As *p* has the hash of the batch *53* (namely, *hash53*), which *p* trusts since it obtained that directly from the consensus service, *p* can verify the integrity of the missing batches, once all of them have arrived. The verification checks that they form a proper hash chain. + +As soon as *p* has obtained all missing batches and has verified the missing batches 26-53, it can proceed to the steps of section 2.5 for each of the batches 26-54. From this *p* then constructs the blockchain state and the validated ledger. + +Notice that *p* can start to speculatively reconstruct the blockchain state and the validated ledger as soon as it receives batches with lower sequence numbers, even if it still misses some batches with higher sequence numbers. However, before externalizing the state and committing blocks to the validated ledger, *p* must complete the state transfer of all missing batches (in our example, up to batch 53, inclusively) and processing of individual transferred batches as described in Section 2.5. + + +###5.2. Checkpointing + +The raw ledger contains invalid transactions, which may not necessarily be recorded forever. However, peers cannot simply discard raw-ledger batches and thereby prune the raw ledger once they establish the corresponding validated-ledger blocks. Namely, in this case, if a new peer joins the network, other peers could not transfer the discarded batches (in the raw ledger) to the joining peer, nor convince the joining peer of the validity of their blocks (of the validated ledger). + +To facilitate pruning of the raw ledger, this document describes a *checkpointing* mechanism. This mechanism establishes the validity of the validated-ledger blocks across the peer network and allows checkpointed validated-ledger blocks to replace the discarded raw-ledger batches. This, in turn, reduces storage space, as there is no need to store invalid transactions. It also reduces the work to reconstruct the state for new peers that join the network (as they do not need to establish validity of individual transactions when reconstructing the state from the raw ledger, but simply replay the state updates contained in the validated ledger). + +Notice that checkpointing facilitates pruning of the raw ledger and, as such, it is only performance optimization. Checkpointing is not necessary to make the design correct. + +####5.2.1. Checkpointing protocol + +Checkpointing is performed periodically by the peers every *CHK* blocks, where *CHK* is a configurable parameter. To initiate a checkpoint, the peers broadcast (e.g., gossip) to other peers message ``, where `blockno` is the current blocknumber and `blocknohash` is its respective hash, and `peerSig` is peer's signature on `(CHECKPOINT,blocknohash,blockno)`, referring to the validated ledger. + +A peer collects `CHECKPOINT` messages until it obtains enough correctly signed messages with matching `blockno` and `blocknohash` to establish a *valid checkpoint* (see Section 5.2.2.). + +Upon establishing a valid checkpoint for block number `blockno` with `blocknohash`, a peer: + +* if `blockno>latestValidCheckpoint.blockno`, then a peer assigns `latestValidCheckpoint=(blocknohash,blockno)`, +* stores the set of respective peer signatures that constitute a valid checkpoint into the set `latestValidCheckpointProof`. +* (optionally) prunes its raw ledger up to batch number `blockno` (inclusive). + +####5.2.2. Valid checkpoints + +Clearly, the checkpointing protocol raises the following questions: *When can a peer prune its Raw Ledger? How many `CHECKPOINT` messages are "sufficiently many"?*. This is defined by a *checkpoint validity policy*, with (at least) two possible approaches, which may also be combined: + +* *Local (peer-specific) checkpoint validity policy (LCVP).* A local policy at a given peer *p* may specify a set of peers which peer *p* trusts and whose `CHECKPOINT` messages are sufficient to establish a valid checkpoint. For example, LCVP at peer *Alice* may define that *Alice* needs to receive `CHECKPOINT` message from Bob, or from *both* *Charlie* and *Dave*. + +* *Global checkpoint validity policy (GCVP).* A checkpoint validity policy may be specified globally. This is similar to a local peer policy, except that it is stipulated at the system (blockchain) granularity, rather than peer granularity. For instance, GCVP may specify that: + * each peer may trust a checkpoint if confirmed by *7* different peers. + * in a deployment where every consenter is also a peer and where up to *f* consenters may be (Byzantine) faulty, each peer may trust a checkpoint if confirmed by *f+1* different consenters. + + + +####5.2.3. Validated ledger state transfer (block transfer) + +Besides facilitating the pruning of the raw ledger, checkpoints enable state transfer through validated-ledger block transfers. These can partially replace raw-ledger batch transfers. + +Conceptually, the block transfer mechanism works similarly to batch transfer. Recall our example in which a peer *p* misses batches 26-53, and is transferring state from a peer *q* that has established a valid checkpoint at block 50. State transfer then proceeds in two steps: + +* First, *p* tries to obtain the validated ledger up to the checkpoint of peer *q* (at block 50). To this end, *q* sends to *p* its local (`latestValidCheckpoint,latestValidCheckpointProof`) pair. In our example `latestValidCheckpoint=(hash50,block50)`. If `latestValidCheckpointProof` satisfies the checkpoint validity policy at peer *p*, then the transfer of the blocks 26 to 50 is possible. Otherwise, peer *q* cannot convince *p* that its local checkpoint is valid. Peer *p* might then choose to proceed with raw-ledger batch transfer (Section 5.1). + +* If the transfer of blocks 26-50 was successful, peer *p* still needs to complete state transfer by fetching blocks 51-53 of the validated ledger or batches 51-53 of the raw ledger. To this end, *p* can simply follow the Raw-ledger batch transfer protocol (Section 5.1), transferring these from *q* or any other peer. Notice that the validated-ledger blocks contain hashes of respective raw-ledger batches (Section 4.2). Hence the batch transfer of the raw ledger can be done even if peer *p* does not have batch 50 in its Raw Ledger, as hash of batch 50 is included in block 50. + + + +## 6. Confidentiality + +This section explains how this architecture facilitates the deployment of +chaincodes that involve processing of sensitive data that must be kept +confidential from certain peers. + +**Fabric-level confidentiality policies.** In a nutshell, this architecture +offers certain confidentiality features at the *fabric* layer, where: + + * endorsers of a confidential chaincode have access to the plaintext of: + * the chaincode deploy transaction payload; + * the chaincode invoke transaction payload; + * the chaincode state and state updates; and + * other peers are prevented from accessing plaintext of this information. + +Here we make the assumption that the endorsers included in the endorsement set +of a confidential chaincode are trusted by the chaincode creator to access the +resources of a chaincode and to maintain their confidentiality. + +The degree to which a chaincode employs the fabric-confidentiality features +is defined in a **confidentiality policy** specified by the deployer at +deployment time. +In particular, the fabric offers support for the following confidentiality +policies: + +| Policy ID | Deploy Payload | Invoke Payload | State Updates | +|---|---|---|---|---| +| Policy `000` | In-the-clear | In-the-clear | In-the-clear | +| Policy `010` | In-the-clear | Confidential | In-the-clear | +| Policy `011` | In-the-clear | Confidential | Confidential | +| Policy `110` | Confidential | Confidential | In-the-clear | +| Policy `111` | Confidential | Confidential | Confidential | +| Policy* `0xx` | In-the-clear | any | any | +| Policy* `1xx` | Confidential | any | any | + +**Table 6.1.** Fabric confidentiality policies. + +*Confidential* means that access to the cleartext of the corresponding +transaction component (i.e., deploy/invoke payload or state updates) is restricted to the endorsing peers of the +transaction. +*In-the-clear* means that the corresponding transaction component can be +read and accessed by all peers. *Any* denotes either *Confidential* or +*In-the-clear*. + +In the following we assume that the payload of a deploy transaction +includes both code and application data/metadata as a single item. However, +in future revisions of this design, we may treat the two independently from a +confidentiality policy perspective. + +In the rest of this text, a `Confidential` *chaincode* is one with a +confidentiality policy different from `000`. Also, in the rest of +this text, we make the assumption that every peer is associated with an +enrollment (encryption) public key, as described in +[Hyperledger fabric Protocol specification](https://github.com/hyperledger/fabric/blob/master/docs/protocol-spec.md). In particular, for every endorser `e` a public key `ePubKey` is known to all +peers and peer `e` knows the corresponding private key for decryption. + +**Disclaimers:** + +*Hiding the transaction activity w.r.t. a chaincode*. +It is important to note that the design does not hide the +identifiers of the chaincode for which transactions are executed nor does +it hide which parts of chaincode state are updated by a transaction. +In other words, transactions and state are encrypted but activity and state +changes can be linked by the committing peers. With respect to third parties, +the committing peers are permitted by the chaincode creators to notice activity +of a chaincode and trusted to not leak this information. +However, we aim to remedy this in a revised version of this design. + +*Granularity of the confidentiality of the state.* This design currently +treats the a chaincode and its state as one confidentiality domain, not +distinguishing different key/value entries. It is possible to assign +different confidentiality policies to different parts of the state at the +application layer, supporting goals such as some parts of the state need to +be visible to a subset of the clients, but other parts not. That is, such +guarantees are not prevented by the architecture and can already be implemented +at the application level, using adequate cryptographic tools in one or more +chaincodes. This would result in *chaincode-level confidentiality*. +Other requirements, such as hiding the identities of endorsers and/or the +endorsement policy from the committing peers will be tackled at future iteration +of this design. + + + +### 6.1 Confidential chaincode deployment + +#### 6.1.1 Creating a deploy transaction + +To deploy chaincode `cc` with confidentiality support, the client that +deploys `cc` (the *deployer* of `cc`) specifies: + +* the chaincode itself, including the source code, and metadata associated to + it, subsumed in `chainCode`; +* the policies accompanying the endorsement of transactions associated to that + chaincode, i.e., + - an endorsement policy, denoted `ccEndorsementPolicy`; + - a set of endorsers, denoted `ccEndorserSet`; + - a confidentiality policy `ccConfidentialityPolicy` for the chaincode, + specifying confidentiality levels as described in Table 6.1.; +* cryptographic material associated to the chaincode, i.e., an asymmetric + encryption key pair `ccPubKey/ccPrivKey`, intended to provide confidentiality + of the transactions, state, and state updates of this particular chaincode. + +This information is incorporated into a (deploy) transaction `tx` that is +subsequently included in a `SUBMIT` message passed to the submitting peer. +The client constructs `txPayload` of the deploy transaction of `cc` as follows. + +The deployer first sets `txPayload.policies` to +<`ccEndorsementPolicy`, `ccEndorserSet`, `ccConfidentialityPolicy`>. + +To fill in `txPayload.payload`, it first checks if `ccConfidentialityPolicy` +specifies `Deploy Payload = Confidential`. If so, then the deployer encrypts +`chainCode` using `ccPubKey`, so that the chaincode and its metadata will +only be accessible by peers entitled to see them. That is, +`txPayload.chainCode := Enc(ccPubKey, chainCode)`. + +Furthermore, the chaincode-specific decryption key `ccPrivKey` is +distributed to all endorsing peers of chaincode `cc` (i.e., endorsers in +`ccEndorserSet`). This is done by wrapping `ccPrivKey` under the public key +of `e` for every endorser `e` in `ccEndorserSet`, +`wrappedKey_e := Enc(ePubKey, ccPrivKey)`, where `ePubKey` is the +enrollment public key of `e`. Thus, the deployer creates an additional field +`txPayload.endorserMessages := ccEndorserMessages`, where `ccEndorserMessages` +includes `wrappedKey_e` for all `e` in `ccEndorserSet`. + +We emphasize that a deploy transaction may include more fields, that are +intentionally omitted here in favor of presentation simplicity. +In addition, for sending the wrapped chaincode-key to all endorsers, and for +actually encrypting `chainCode` hybrid encryption schemes could be used to +achieve better performance. More details can be provided in future versions of +this document. + +Chaincode `cc` is assigned an identifier, hereafter referred to as +`chaincodeID`, as described in Section 2.2, for instance, as a hash of the +deploy transaction `tx`. We assume here that it is unique per chaincode and +it may become known to all peers. + +**Endorsement of Deploy Transaction:** Recall that every deploy transaction is +treated as an invoke transaction of a system chaincode handling chaincode +deployments; let this chaincode be denoted by `dsc`, and the respective +endorsement policy and set of endorsers are `dscEndorsementPolicy` and +`dscEndorserSet`. This means that the deploy transaction of any chaincode, +needs to be endorsed according to `dscEndorsementPolicy`. + +**TODO:** Consider whether for confidential chaincodes, the endorsement +policy of deploy transaction itself should satisfy the endorsement policy +of the chaincode being deployed, that is, whether the deploy transaction of +`cc` should also satisfy `ccEndorsementPolicy`. Alternatively, see if it makes +sense to split a deploy transaction into two parts, e.g., a `deploy` (that only states the deploy info) and an `install` that sets up the running chaincode. + + +#### 6.1.2 Peer processes a deploy transaction + +When a peer `e` commits a deploy transaction for chaincode `cc`, then it +has at least access to `chaincodeID` and to `txPayload.policies`, which contains `ccConfidentialityPolicy`, `ccEndorsementPolicy`, and `ccEndorserSet`. + +If `e` is also an endorser for `cc` then in addition `e` obtains the following +values: + +* the secret decryption key of the chaincode, as + `ccPrivKey := Dec(ePrivKey, wrappedKey_e)`; +* the cleartext of the deployment payload, as + `chainCode := Dec(ccPrivKey, txPayload.chainCode)`, if `Deploy Payload = Confidential`. + +Given the in plaintext deployment payload `chainCode`, and prior to actually +installing it, the peer performs a consistency check as described below. +The peer then uses cleartext `chainCode` in the remainder of the +deployment procedure, as described in Section 2. + + +**Consistency:** For deploying a chaincode, the protocol should ensure that +every endorser `e` will actually install and run the same code, even if the +creator of the chaincode (the peer that submits the deploy transaction) +would intend to make endorsers run different code. To this effect, `e` +should run a verification step during the execution of the deploy +transaction, which ensures, roughly, the following. The deployment +transaction either *succeeds* and the peer outputs the chaincode `cc` to be +executed or the transaction *fails* and outputs a corresponding error. The +deployment ensures the following condition: If two deployment succeeds for +two distinct [correct, non-faulty] endorsers, then they both deploy the +same chaincode. This condition is equivalent to the *Consistency* property +of a Byzantine Consistent Broadcast [[CGR11; Sec. 3.10]](http://distributedprogramming.net). + +Since every endorser executes the deployment transaction as a result of +obtaining it from the consensus service, all endorsers receive the same +`chainCode` blob, which may contain encryptions. But when an +endorser decrypts that with its own key, it does not automatically +guarantee that the resulting code of `cc` is the same for every other +endorser. + +This can be addressed in multiple ways: One solution would be to use a +specialized multi-receiver encryption scheme that includes randomness in +the ciphertext. Alternatively, verifiable encryption can be used with a +zero-knowledge proof that the plaintext for all receivers are equal (this +seems less efficient than the first option). In any case, this additional data +that ensures the consistency condition must be included by the deployer in +the deploy transaction `tx`. + +**TODOs:** Many details are not yet addressed, such as: + +* Give more information on the implementation of `dsc` (perhaps in a different + section). More specifically, information on: + - the `dsc` code itself, and its `dscEndorsmentPolicy` implementation (ref. section 2.4) + - details of `tran-proposal` of `dsc` . Perhaps in a separate section? +* Consider alternative implementations for the identifier of the deployed + chaincode (`chaincodeID`). +* Describe how to react on detecting a violation of the consistency + property above, for example, because the creator provided the + wrong wrapped keys to endorsers. + + +### 6.2 Confidential chaincode invocation + +Invoke transactions for confidential chaincode will comply with the +*static* confidentiality policy that has been specified at deployment time (in +`ccConfidentialityPolicy`). Future revisions may consider +possibilities of *dynamic* confidentiality policy that may evolve during +the runtime of the system. + +A confidential chaincode is invoked similarly to any other chaincode. +The difference is that here a submitting peer of a transaction pertaining +to confidential chaincode needs to be an endorser for that chaincode. This +means it has access to the keys protecting that chaincode and its state. To +help maintain stateless clients, every peer knows which peers are endorsers for +a given confidential chaincode (see Section 6.1, `ccEndorserSet`), and can +direct clients to an appropriate submitting peer. Hence, in the rest of this +section, we assume that a submitting peer is also an endorser. + + +#### 6.2.1 Creating and submitting a confidential transaction + +The client is aware of the chaincode for which it creates a transaction, +and of its endorsers. The `SUBMIT` message of a confidential transaction +invocation consists of the same fields as non-confidential (see Section 2.1.), +i.e., ``, where +`tx=`, and where `clientID` is +some form of identification of the client at the fabric layer, e.g., a +transaction certificate, `clientSig` is signature of a client on other +fields of `tx`. + +Notice that for security purposes, `tx` may also consist of more fields, that +are intentionally omitted here in favor of presentation simplicity. + +The difference with respect to non-confidential transactions is as follows. +If the confidentiality policy associated to the chaincode (`ccConfidentialityPolicy`) +specifies +`Invoke Payload = Confidential`, then the client additionally encrypts +the invocation arguments, and metadata, say `invocation` with `ccPubKey` +into `txPayload.invocation`. That is, +`txPayload.invocation := Enc(ccPubKey, invocation)`. + +Again, hybrid encryption schemes can be used for better performance. +Also keys defined at deployment time can be used to generate other keys, e.g., +keys the key state would need to be encrypted with to reduce the overall +number of keys that need to be managed/distributed. + +**TODO:** Optionally provide a customized encryption method that also +hides the chaincode identifier. + + +#### 6.2.2 Endorsing a confidential transaction + +Upon receiving and verifying ``, for tentatively +executing the code associated to the transaction, +and for preparing the transaction to be sent to the consensus service, the +submitting peer first decrypts a confidential transaction payload. More +precisely, if `ccConfidentialityPolicy` specifies that `Invoke Payload` is +`Confidential`, the submitting peer first retrieves the corresponding +chaincode-specific decryption key `ccPrivKey` and then decrypts +`txPayload.invocation`. Recall that the submitting peer is assumed to be an endorser +for the chaincode. The peer, say `e`, may retrieve `ccPrivKey` from the deploy +transaction of the chaincode `chaincodeID` and obtain `ccPrivKey` through +`wrapped_e`. Then the peer computes `invocation := Dec(ccPrivKey, txPayload.invocation)`. + +With the operation and metadata in `invocation` the submitting peer tentatively executes the +transaction using its copy of the state for producing a transaction +proposal. If the confidentiality policy of the chaincode specifies that +`State` is also `Confidential`, then the peer uses `ccPrivKey` to +access the state, decrypting state values while reading. + +Moreover, for the state updates when the confidentiality policy specifies that +`State = Confidential`, the submitting peer encrypts +new state values in `stateUpdates` using `ccPubKey`. With the state +in the form of key/value pairs, only the changed values are encrypted. +The version dependencies are not encrypted. + +The transaction-proposal from the submitting peer now consists of + +`tran-proposal := (spID, chaincodeID, txContentBlob, stateUpdates, verDep)`, + +where `txContentBlob` is some form of invoke transaction `tx`, as submitted +by the client. + +The submitting peer creates a `PROPOSE` message to send to the rest of endorsers +in the endorser set as before (Section 2.2): + +``. + +Notice that is imperative that endorsers check that the chaincodeID that appears +in `tx` is consistent with the content of `tran-proposal`. Endorsers follow +the same process as before to endorse a transaction. + +In summary, this mechanism ensures that in case the state is encrypted, the +endorsers for a chaincode have access to the state in the clear but other +peers have not. Once the `stateUpdates` is applied to the state after the +consensus service has delivered it to a peer, every peer updates its state. +Note that also the endorsers of a chaincode apply the updates and +transparently operate on the ciphertext; they only need access to the +plaintext again for endorsing a next transaction. + + +**TODO:** Revisit section 4 and describe in more detail which parts are +added to the ledger. diff --git a/proposals/r1/Next-Ledger-Architecture-Proposal.md b/proposals/r1/Next-Ledger-Architecture-Proposal.md new file mode 100644 index 00000000000..b2894557e92 --- /dev/null +++ b/proposals/r1/Next-Ledger-Architecture-Proposal.md @@ -0,0 +1,223 @@ +**Draft** / **Work in Progress** + +This page documents a proposal for a future ledger architecture based on community feedback. All input is welcome as the goal is to make this a community effort. + +##### Table of Contents +[Motivation](#motivation) +[API](#api) +[Point-in-Time Queries](#pointintime) +[Query Language](#querylanguage) + + +## Motivation +The motivation for exploring a new ledger architecture is based on community feedback. While the existing ledger is able to support some (but not all) of the below requirements, we wanted to explore what a new ledger would look like given all that has been learned. Based on many discussions in the community over Slack, GitHub, and the face to face hackathons, it is clear that there is a strong desire to support the following requirements: + +1. Point in time queries - The ability to query chaincode state at previous blocks and easily trace lineage **without** replaying transactions +2. SQL like query language +3. Privacy - The complete ledger may not reside on all committers +4. Cryptographically secure ledger - Data integrity without consulting other nodes +5. Support for consensus algorithms that provides immediate finality like PBFT +6. Support consensus algorithms that require stochastic convergence like PoW, PoET +7. Pruning - Ability to remove old transaction data as needed. +8. Support separation of endorsement from consensus as described in the [Next Consensus Architecture Proposal](https://github.com/hyperledger/fabric/wiki/Next-Consensus-Architecture-Proposal). This implies that some peers may apply endorsed results to their ledger **without** executing transactions or viewing chaincode logic. +9. API / Enginer separation. The ability to plug in different storage engines as needed. + + +## API + +Proposed API in Go pseudocode + +``` +package ledger + +import "github.com/hyperledger/fabric/protos" + +// Encryptor is an interface that a ledger implementation can use for Encrypt/Decrypt the chaincode state +type Encryptor interface { + Encrypt([]byte) []byte + Decrypt([]byte) []byte +} + +// PeerMgmt is an interface that a ledger implementation expects from peer implementation +type PeerMgmt interface { + // IsPeerEndorserFor returns 'true' if the peer is endorser for given chaincodeID + IsPeerEndorserFor(chaincodeID string) bool + + // ListEndorsingChaincodes return the chaincodeIDs for which the peer acts as one of the endorsers + ListEndorsingChaincodes() []string + + // GetEncryptor returns the Encryptor for the given chaincodeID + GetEncryptor(chaincodeID string) (Encryptor, error) +} + +// In the case of a confidential chaincode, the simulation results from ledger are expected to be encrypted using the 'Encryptor' corresponding to the chaincode. +// Similarly, the blocks returned by the GetBlock(s) method of the ledger are expected to have the state updates in the encrypted form. +// However, internally, the ledger can maintain the latest and historical state for the chaincodes for which the peer is one of the endorsers - in plain text form. +// TODO - Is this assumption correct? + +// General purpose interface for forcing a data element to be serializable/de-serializable +type DataHolder interface { + GetData() interface{} + GetBytes() []byte + DecodeBytes(b []byte) interface{} +} + +type SimulationResults interface { + DataHolder +} + +type QueryResult interface { + DataHolder +} + +type BlockHeader struct { +} + +type PrunePolicy interface { +} + +type BlockRangePrunePolicy struct { + FirstBlockHash string + LastBlockHash string +} + +// QueryExecutor executes the queries +// Get* methods are for supporting KV-based data model. ExecuteQuery method is for supporting a rich datamodel and query support +// +// ExecuteQuery method in the case of a rich data model is expected to support queries on +// latest state, historical state and on the intersection of state and transactions +type QueryExecutor interface { + GetState(key string) ([]byte, error) + GetStateRangeScanIterator(startKey string, endKey string) (ResultsIterator, error) + GetStateMultipleKeys(keys []string) ([][]byte, error) + GetTransactionsForKey(key string) (ResultsIterator, error) + + ExecuteQuery(query string) (ResultsIterator, error) +} + +// TxSimulator simulates a transaction on a consistent snapshot of the as recent state as possible +type TxSimulator interface { + QueryExecutor + StartNewTx() + + // KV data model + SetState(key string, value []byte) + DeleteState(key string) + SetStateMultipleKeys(kvs map[string][]byte) + + // for supporting rich data model (see comments on QueryExecutor above) + ExecuteUpdate(query string) + + // This can be a large payload + CopyState(sourceChaincodeID string) error + + // GetTxSimulationResults encapsulates the results of the transaction simulation. + // This should contain enough detail for + // - The update in the chaincode state that would be caused if the transaction is to be committed + // - The environment in which the transaction is executed so as to be able to decide the validity of the enviroment + // (at a later time on a different peer) during committing the transactions + // Different ledger implementation (or configurations of a single implementation) may want to represent the above two pieces + // of information in different way in order to support different data-models or optimize the information representations. + // TODO detailed illustration of a couple of representations. + GetTxSimulationResults() SimulationResults + HasConflicts() bool + Clear() +} + +type ResultsIterator interface { + // Next moves to next key-value. Returns true if next key-value exists + Next() bool + // GetKeyValue returns next key-value + GetResult() QueryResult + // Close releases resources occupied by the iterator + Close() +} + +// Ledger represents the 'final ledger'. In addition to implement the methods inherited from the BasicLedger, +// it provides the handle to objects for querying the chaincode state and executing chaincode transactions. +type ValidatedLedger interface { + Ledger + // NewTxSimulator gives handle to a transaction simulator for given chaincode and given fork (represented by blockHash) + // A client can obtain more than one 'TxSimulator's for parallel execution. Any synchronization should be performed at the + // implementation level if required + NewTxSimulator(chaincodeID string, blockHash string) (TxSimulator, error) + + // NewQueryExecuter gives handle to a query executer for given chaincode and given fork (represented by blockHash) + // A client can obtain more than one 'QueryExecutor's for parallel execution. Any synchronization should be performed at the + // implementation level if required + NewQueryExecuter(chaincodeID string, blockHash string) (QueryExecutor, error) + + // CheckpointPerformed is expected to be invoked by the consensus algorithm when it completes a checkpoint across peers + // On the invoke of this method, the block in the 'RawLedger' between the 'corresponding to the block currently checkpointed' and + // 'corresponding to the block checkpointed last time' can be pruned. + // (Pruning the raw blocks would need an additional time based factor as well, if forks are to be supported in the raw ledger.) + // (Does raw ledger in the case of a consensus that allow forks (e.g., PoW) make sense at all? Or practically, these consensus + // would always produce the final blocks that contains validated transactions). + CheckpointPerformed(blockHash string) + + RemoveInvalidTransactions(block *protos.Block) (*protos.Block, error) +} + +// RawLedger implements methods required by 'raw ledger' +// CommitBlock() of RawLedger is expected to be invoked by the consensus algorithm when a new block is constructed. +// Upon receiving the new block, it is expected to be 'processed' by the ledger - the processing includes - +// preserving the raw block, validating each transaction in the block, discarding the invalid transactions, +// preparing the final block with the rmaining (i.e. valid) transactions and committing the final block (including updating the state). +// The raw block should not be deleted as yet - until the corresponding 'final block' is included in one of the following checkpoint performed by the consensus. +type RawLedger interface { + Ledger +} + +//Ledger captures the methods that are common across the 'raw ledger' and the 'final ledger' +type Ledger interface { + //GetTopBlockHashes returns the hashes of the top most block in each fork. + GetTopBlockHashes() []string + //CommitBlock adds a new block + CommitBlock(block *protos.Block) error + //GetTransactionByID retrieves a transaction by id + GetTransactionByID(txID string) (*protos.Transaction, error) + //GetBlockChain returns an instance of chain that starts at the + GetBlockChain(topBlockHash string) (BlockChain, error) + //Prune prunes the blocks/transactions that satisfy the given policy + Prune(policy PrunePolicy) error +} + +//BlockChain represents an instance of a block chain. In the case of a consensus algorithm that could cause a fork, an instance of BlockChain +// represent one of the forks (i.e., one of the chains starting from the genesis block to the one of the top most blocks) +type BlockChain interface { + GetTopBlockHash() string + GetBlockchainInfo() (*protos.BlockchainInfo, error) + GetBlockHeaders(startingBlockHash, endingBlockHash string) []*BlockHeader + GetBlocks(startingBlockHash, endingBlockHash string) []*protos.Block + GetBlockByNumber(blockNumber uint64) *protos.Block + GetBlocksByNumber(startingBlockNumber, endingBlockNumber uint64) []*protos.Block + GetBlockchainSize() uint64 + VerifyChain(highBlock, lowBlock uint64) (uint64, error) +} +``` + +# Engine specific thoughts + + +### Point-in-Time Queries +In abstract temporal terms, there are three varieties of query important to chaincode and application developers: + +1. Retrieve the most recent value of a key. (type: current; ex. How much money is in Alice's account?) +2. Retrieve the value of a key at a specific time. (type: historical; ex. What was Alice's account balance at the end of last month?) +3. Retrieve all values of a key over time. (type: lineage; ex. Produce a statement listing all of Alice's transactions.) + +When formulating a query, a developer will benefit from the ability to filter, project, and relate transactions to one-another. Consider the following examples: + +1. Simple Filtering: Find all accounts that fell below a balance of $100 in the last month. +2. Complex Filtering: Find all of Trudy's transactions that occurred in Iraq or Syria where the amount is above a threshold and the other party has a name that matches a regular expression. +3. Relating: Determine if Alice has ever bought from the same gas station more than once in the same day. Feed this information into a fraud detection model. +4. Projection: Retrieve the city, state, country, and amount of Alice's last ten transactions. This information will be fed into a risk/fraud detection model. + + +### Query Language +Developing a query language to support such a diverse range of queries will not be simple. The challenges are: + +1. Scaling the query language with developers as their needs grow. To date, the requests from developers have been modest. As the Hyperledger project's user base grows, so will the query complexity. +2. There are two nearly disjoint classes of query: + 1. Find a single value matching a set of constraints. Amenable to existing SQL and NoSQL grammars. + 2. Find a chain or chains of transactions satisfying a set of constraints. Amenable to graph query languages, such as Neo4J's Cypher or SPARQL. diff --git a/proposals/r1/Release-Process.md b/proposals/r1/Release-Process.md new file mode 100644 index 00000000000..ffa63b1cbfb --- /dev/null +++ b/proposals/r1/Release-Process.md @@ -0,0 +1,55 @@ +##### Procedure to cut a Fabric release: + +1. Create a release branch, and name it vM.m\[.p\]\[-qualifier\] where _M_ is the ```major``` release identifier, _m_ is ```minor``` release identifier, _p_ is the patch level identifier (only for LTS releases) and _-qualifier_ is from the taxonomy of release maturity labels \[TBD\]. +1. Tag the master branch with the same name as the release branch +1. Publish release notes +1. Announce the release, linking to the release notes on Hyperledger ```fabric``` and ```general``` slack channels and ```hyperledger-fabric``` and ```hyperledger-announce``` mailing lists + +##### Perform if necessary: + +1. Select [issues](https://jira.hyperledger.org) to fix for the release branch and label them as such with a label of the release identifier. +1. Submit pull requests for those selected issues against the release branch. +1. Decide on whether to merge the changes from the release branch to the master branch or requesting the authors to submit the changes on both branches. +1. Maintainers resolve any merge conflicts. + + +___ +##### Release Taxonomy: + +##### Developer Preview: + +Not feature complete, but most functionality implemented, and any missing functionality that is committed for the production release is identified and there are specific people working on those features.  Not heavily performance tuned, but performance testing has started, and the first few hotspots identified, perhaps even addressed.  No "highest priority" issues in an unclosed state.  First-level developer documentation provided to help new developers up the learning curve. + +Naming convention: 0.x  where x is < 8 (Developer Preview is not used after it hits 1.0) + +##### Alpha: + +Feature complete, for all features committed to the production release.  Ready for Proof of Concept-level deployments.  Performance can be characterized in a predictable way, so that basic PoC's can be done without risk of embarrassing performance.  APIs documented.  First attempts at end-user documentation, developer docs further advanced.  No "highest priority" issues in an unclosed state. + +Naming convention: y.8.x, where y is 0, 1, 2 (basically the last production release) and x is the iteration of the alpha release. + +##### Beta:  + +Feature complete for all features committed to the production release, perhaps with optional features when safe to add.  Ready for Pilot-level engagements.  Performance is very well characterized and active optimizations are being done, with a target set for the Production release.  No "highest priority" or "high priority" bugs unclosed.  Dev docs complete; end-user docs mostly done. + +Naming convention: y.9.x, where y is 0, 1, 2 (basically the last production release) and x is the iteration of the beta release. + +##### Production: + +An exact copy of the last iteration in the beta cycle, for a reasonable length of time (2 weeks?) during which no new highest or high priority bugs discovered, where end-user docs are considered complete, and performance/speed goals have been met.   Ready for Production-level engagements. + +Naming convention: y.z.x, where y is the production release level (1, 2, 3, etc), z is the branch level (for minor improvements), and x is the iteration of the current branch. + + +##### Example: + +Today's Fabric "Developer Preview" release is 0.5.  It could continue to 0.6, 0.7, 0.7.1.... +but when it's ready for Alpha, we call it 0.8, iterating to 0.8.1, 0.8.2, etc.  +When it's ready for beta, 0.9, 0.9.1, 0.9.2, etc.  +And when we are ready for production, 1.0.0. +A bugfix patch release or other small fixups, 1.0.1, 1.0.2, etc.  +A branch would be called for when there is something small that changes behavior in some visible way: 1.1.0, 1.1.1, 1.2.0, 1.2.1, etc. +And when it's time for a significant refactoring or rewrite or major new functionality, we go back into the cycle, but missing the "Developer Preview": +1.8.0 is the alpha for 2.0 +1.9.0 is the beta for 2.0 +2.0.0 is the production release. diff --git a/proposals/r1/System-Chaincode-Specification.md b/proposals/r1/System-Chaincode-Specification.md new file mode 100644 index 00000000000..0d4d62bbc77 --- /dev/null +++ b/proposals/r1/System-Chaincode-Specification.md @@ -0,0 +1,114 @@ + +System Chaincode +================ + +Introduction +------------ + +User written Chaincode runs in a container (called "user chaincode" in +this doc) and communicates with the peer over the network. These +chaincodes have restrictions with regards to what code they can execute. +For example, they can only interact with the peer via \`ChaincodeStub\` +interface such as GetState or PutState. There is a need for chaincode +that have these restrictions relaxed. Such chaincode is broadly termed +"System Chaincode". + +The purpose of system chaincode is to enable customization of certain +behaviors of the fabric such as timer service or naming service. + +The simplest way to design a system chaincode is to run it in the same +process as the Peer. This document goes into this design. + +In-process chaincode +-------------------- + +The approach is to consider the changes and generalizations needed to +run chaincode in-process as the hosting peer. + +Requirements + +- coexist with user chaincode +- adhere to same lifecycle as user chaincode - except for possibly + "deploy" +- easy to install + +The above requirements impose the following design principles + +- design consistent with user chaincode +- uniform treatment of both types of chaincode in the codebase + +Though the impetus for the in-process chaincode design comes from +"system chaincode", in principle the general "in process chaincode" +design would allow any chaincode to be deployed. + +Key considerations for a chaincode environment +---------------------------------------------- + +There are two main considerations for any chaincode environment +(container, in-process,etc) + +- Runtime Considerations +- Lifecycle Considerations + +### Runtime Considerations + +Runtime considerations can be broadly classified into Transport and +Execution mechanisms. The abstractions provided by the Fabric for these +mechanisms are key to the ability to extend the system to support new +chaincode runtimes consistent with existing ones in a fairly +straightforward manner. + +### Transport mechanism + +Peer and chaincode communicate using ChaincodeMessages. + +- the protocol is completely encapsulated via ChaincodeMessage +- current gRPC streaming maps nicely to Go Channels + +The above allow abstraction of a small section of code dealing with +chaincode transport, leaving most of the code common to both +environments. This is the key to the uniform treatment of runtime +environments. + +### Execution mechanism + +The fabric provides a basic interface for a “vm”. The Docker container +is implemented on top of this abstraction. The “vm” ([vm interface](https://github.com/hyperledger/fabric/blob/master/core/container/controller.go)) +can be used to provide an execution environment for the in-process +runtime as well. This has the obvious benefit of encapsulating all +execution access (deploy, launch, stop, etc) transparent to the +implementation. + +Lifecycle Considerations +------------------------ + +| Life cycle | Container model | In-process Model | +|------------------------|------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Deploy | User deploys with an external request. Participates in consensus | Two possibilities +++ (1) comes up automatically with the peer, does not participate in consensus (2) user starts the chaincode with a request. Participates in consensus | +| Invoke/query | User sends request | User sends request | +| Stop (not implemented) | User sends request | User sends request | +| Upgrade *** (not impemented) | | | ++++ System chaincodes that are part of genesis block cannot/should not +be part of consensus. E.g., bootstrapping requirements that would +prevent them from taking part in consensus. That is not true for +upgrading those chaincodes. + +So this may then be the model - “system chaincode that are in genesis +block will not be deployed via consensus. All other chaincode deployment +and all chaincode upgrades (including system chaincode) will be part of +consensus. + +\*\*\*\* Upgrade is mentioned here just for completeness. It is an +important part of life-cycle that should be a separate discussion unto +itself involving devops considerations such as versioning, staging, +staggering etc. + +Deploying System Chaincodes +--------------------------- + +System chaincodes come up with a system. System chaincodes will be +defined in core.yaml. Minimally, a chaincode definition would contain +the following information { Name, Path }. Each chaincode definition +could also specify dependent chaincodes such that a chaincode will be +brought up only if its dependent chaincodes are. The chaincodes would be +brought up in the order specified in the list. diff --git a/proposals/r1/blocks-2.png b/proposals/r1/blocks-2.png new file mode 100644 index 00000000000..7fd8b11c4fe Binary files /dev/null and b/proposals/r1/blocks-2.png differ diff --git a/proposals/r1/flow-2.png b/proposals/r1/flow-2.png new file mode 100644 index 00000000000..04380e4652e Binary files /dev/null and b/proposals/r1/flow-2.png differ diff --git a/protos/api.pb.go b/protos/api.pb.go index 6fdedbf38a2..30a8ff71d3e 100644 --- a/protos/api.pb.go +++ b/protos/api.pb.go @@ -84,7 +84,7 @@ package protos import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf1 "google/protobuf" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" import ( context "golang.org/x/net/context" @@ -96,28 +96,45 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // Specifies the block number to be returned from the blockchain. type BlockNumber struct { Number uint64 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` } -func (m *BlockNumber) Reset() { *m = BlockNumber{} } -func (m *BlockNumber) String() string { return proto.CompactTextString(m) } -func (*BlockNumber) ProtoMessage() {} +func (m *BlockNumber) Reset() { *m = BlockNumber{} } +func (m *BlockNumber) String() string { return proto.CompactTextString(m) } +func (*BlockNumber) ProtoMessage() {} +func (*BlockNumber) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Specifies the current number of blocks in the blockchain. type BlockCount struct { Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` } -func (m *BlockCount) Reset() { *m = BlockCount{} } -func (m *BlockCount) String() string { return proto.CompactTextString(m) } -func (*BlockCount) ProtoMessage() {} +func (m *BlockCount) Reset() { *m = BlockCount{} } +func (m *BlockCount) String() string { return proto.CompactTextString(m) } +func (*BlockCount) ProtoMessage() {} +func (*BlockCount) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*BlockNumber)(nil), "protos.BlockNumber") + proto.RegisterType((*BlockCount)(nil), "protos.BlockCount") +} // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Openchain service type OpenchainClient interface { @@ -200,52 +217,76 @@ func RegisterOpenchainServer(s *grpc.Server, srv OpenchainServer) { s.RegisterService(&_Openchain_serviceDesc, srv) } -func _Openchain_GetBlockchainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Openchain_GetBlockchainInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(OpenchainServer).GetBlockchainInfo(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(OpenchainServer).GetBlockchainInfo(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Openchain/GetBlockchainInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenchainServer).GetBlockchainInfo(ctx, req.(*google_protobuf1.Empty)) + } + return interceptor(ctx, in, info, handler) } -func _Openchain_GetBlockByNumber_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Openchain_GetBlockByNumber_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BlockNumber) if err := dec(in); err != nil { return nil, err } - out, err := srv.(OpenchainServer).GetBlockByNumber(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(OpenchainServer).GetBlockByNumber(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Openchain/GetBlockByNumber", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenchainServer).GetBlockByNumber(ctx, req.(*BlockNumber)) + } + return interceptor(ctx, in, info, handler) } -func _Openchain_GetBlockCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Openchain_GetBlockCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(OpenchainServer).GetBlockCount(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(OpenchainServer).GetBlockCount(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Openchain/GetBlockCount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenchainServer).GetBlockCount(ctx, req.(*google_protobuf1.Empty)) + } + return interceptor(ctx, in, info, handler) } -func _Openchain_GetPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Openchain_GetPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(OpenchainServer).GetPeers(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(OpenchainServer).GetPeers(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Openchain/GetPeers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenchainServer).GetPeers(ctx, req.(*google_protobuf1.Empty)) + } + return interceptor(ctx, in, info, handler) } var _Openchain_serviceDesc = grpc.ServiceDesc{ @@ -269,5 +310,28 @@ var _Openchain_serviceDesc = grpc.ServiceDesc{ Handler: _Openchain_GetPeers_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("api.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 244 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x53, 0xc5, 0x52, 0x3c, 0x69, 0x89, 0x49, 0x45, + 0x99, 0xc9, 0x10, 0x51, 0x29, 0xe9, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0x7d, 0x30, 0x2f, 0xa9, + 0x34, 0x4d, 0x3f, 0x35, 0xb7, 0xa0, 0xa4, 0x12, 0x22, 0xa9, 0xa4, 0xca, 0xc5, 0xed, 0x94, 0x93, + 0x9f, 0x9c, 0xed, 0x57, 0x9a, 0x9b, 0x94, 0x5a, 0x24, 0x24, 0xc6, 0xc5, 0x96, 0x07, 0x66, 0x49, + 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x04, 0x41, 0x79, 0x4a, 0x4a, 0x5c, 0x5c, 0x60, 0x65, 0xce, 0xf9, + 0xa5, 0x79, 0x25, 0x42, 0x22, 0x5c, 0xac, 0xc9, 0x20, 0x06, 0x54, 0x11, 0x84, 0x63, 0xd4, 0xce, + 0xc4, 0xc5, 0xe9, 0x5f, 0x90, 0x9a, 0x97, 0x9c, 0x91, 0x98, 0x99, 0x27, 0xe4, 0xca, 0x25, 0xe8, + 0x9e, 0x5a, 0x02, 0xd6, 0x04, 0x16, 0xf0, 0xcc, 0x4b, 0xcb, 0x17, 0x12, 0xd3, 0x83, 0xb8, 0x45, + 0x0f, 0xe6, 0x16, 0x3d, 0x57, 0x90, 0x5b, 0xa4, 0xc4, 0x20, 0x02, 0xc5, 0x7a, 0xa8, 0xea, 0x95, + 0x18, 0x84, 0x2c, 0xb8, 0x04, 0x60, 0xc6, 0x38, 0x55, 0x42, 0x1d, 0x29, 0x8c, 0xa2, 0x1a, 0x22, + 0x28, 0xc5, 0x8b, 0x22, 0x08, 0xd4, 0x69, 0xcb, 0xc5, 0x0b, 0xd3, 0x09, 0x71, 0x35, 0x2e, 0xcb, + 0x85, 0x50, 0x74, 0x82, 0xd5, 0x02, 0xb5, 0x5b, 0x71, 0x71, 0x00, 0xb5, 0x07, 0xa4, 0xa6, 0x16, + 0x15, 0xe3, 0xd4, 0x29, 0x02, 0xd3, 0x09, 0x56, 0xe6, 0x9b, 0x5a, 0x5c, 0x9c, 0x98, 0x9e, 0xaa, + 0xc4, 0x90, 0x04, 0x89, 0x07, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0xce, 0xa8, 0xc9, + 0x9b, 0x01, 0x00, 0x00, } diff --git a/protos/chaincode.pb.go b/protos/chaincode.pb.go index e8a1cf1c1cc..081ef80bd1a 100644 --- a/protos/chaincode.pb.go +++ b/protos/chaincode.pb.go @@ -7,7 +7,7 @@ package protos import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "google/protobuf" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" @@ -39,6 +39,7 @@ var ConfidentialityLevel_value = map[string]int32{ func (x ConfidentialityLevel) String() string { return proto.EnumName(ConfidentialityLevel_name, int32(x)) } +func (ConfidentialityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } type ChaincodeSpec_Type int32 @@ -68,6 +69,7 @@ var ChaincodeSpec_Type_value = map[string]int32{ func (x ChaincodeSpec_Type) String() string { return proto.EnumName(ChaincodeSpec_Type_name, int32(x)) } +func (ChaincodeSpec_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 0} } type ChaincodeDeploymentSpec_ExecutionEnvironment int32 @@ -88,6 +90,9 @@ var ChaincodeDeploymentSpec_ExecutionEnvironment_value = map[string]int32{ func (x ChaincodeDeploymentSpec_ExecutionEnvironment) String() string { return proto.EnumName(ChaincodeDeploymentSpec_ExecutionEnvironment_name, int32(x)) } +func (ChaincodeDeploymentSpec_ExecutionEnvironment) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{3, 0} +} type ChaincodeMessage_Type int32 @@ -165,6 +170,7 @@ var ChaincodeMessage_Type_value = map[string]int32{ func (x ChaincodeMessage_Type) String() string { return proto.EnumName(ChaincodeMessage_Type_name, int32(x)) } +func (ChaincodeMessage_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{6, 0} } // ChaincodeID contains the path as specified by the deploy transaction // that created it as well as the hashCode that is generated by the @@ -181,18 +187,22 @@ type ChaincodeID struct { Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` } -func (m *ChaincodeID) Reset() { *m = ChaincodeID{} } -func (m *ChaincodeID) String() string { return proto.CompactTextString(m) } -func (*ChaincodeID) ProtoMessage() {} +func (m *ChaincodeID) Reset() { *m = ChaincodeID{} } +func (m *ChaincodeID) String() string { return proto.CompactTextString(m) } +func (*ChaincodeID) ProtoMessage() {} +func (*ChaincodeID) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } // Carries the chaincode function and its arguments. +// UnmarshalJSON in transaction.go converts the string-based REST/JSON input to +// the []byte-based current ChaincodeInput structure. type ChaincodeInput struct { Args [][]byte `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` } -func (m *ChaincodeInput) Reset() { *m = ChaincodeInput{} } -func (m *ChaincodeInput) String() string { return proto.CompactTextString(m) } -func (*ChaincodeInput) ProtoMessage() {} +func (m *ChaincodeInput) Reset() { *m = ChaincodeInput{} } +func (m *ChaincodeInput) String() string { return proto.CompactTextString(m) } +func (*ChaincodeInput) ProtoMessage() {} +func (*ChaincodeInput) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } // Carries the chaincode specification. This is the actual metadata required for // defining a chaincode. @@ -207,9 +217,10 @@ type ChaincodeSpec struct { Attributes []string `protobuf:"bytes,8,rep,name=attributes" json:"attributes,omitempty"` } -func (m *ChaincodeSpec) Reset() { *m = ChaincodeSpec{} } -func (m *ChaincodeSpec) String() string { return proto.CompactTextString(m) } -func (*ChaincodeSpec) ProtoMessage() {} +func (m *ChaincodeSpec) Reset() { *m = ChaincodeSpec{} } +func (m *ChaincodeSpec) String() string { return proto.CompactTextString(m) } +func (*ChaincodeSpec) ProtoMessage() {} +func (*ChaincodeSpec) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } func (m *ChaincodeSpec) GetChaincodeID() *ChaincodeID { if m != nil { @@ -235,9 +246,10 @@ type ChaincodeDeploymentSpec struct { ExecEnv ChaincodeDeploymentSpec_ExecutionEnvironment `protobuf:"varint,4,opt,name=execEnv,enum=protos.ChaincodeDeploymentSpec_ExecutionEnvironment" json:"execEnv,omitempty"` } -func (m *ChaincodeDeploymentSpec) Reset() { *m = ChaincodeDeploymentSpec{} } -func (m *ChaincodeDeploymentSpec) String() string { return proto.CompactTextString(m) } -func (*ChaincodeDeploymentSpec) ProtoMessage() {} +func (m *ChaincodeDeploymentSpec) Reset() { *m = ChaincodeDeploymentSpec{} } +func (m *ChaincodeDeploymentSpec) String() string { return proto.CompactTextString(m) } +func (*ChaincodeDeploymentSpec) ProtoMessage() {} +func (*ChaincodeDeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } func (m *ChaincodeDeploymentSpec) GetChaincodeSpec() *ChaincodeSpec { if m != nil { @@ -266,9 +278,10 @@ type ChaincodeInvocationSpec struct { IdGenerationAlg string `protobuf:"bytes,2,opt,name=idGenerationAlg" json:"idGenerationAlg,omitempty"` } -func (m *ChaincodeInvocationSpec) Reset() { *m = ChaincodeInvocationSpec{} } -func (m *ChaincodeInvocationSpec) String() string { return proto.CompactTextString(m) } -func (*ChaincodeInvocationSpec) ProtoMessage() {} +func (m *ChaincodeInvocationSpec) Reset() { *m = ChaincodeInvocationSpec{} } +func (m *ChaincodeInvocationSpec) String() string { return proto.CompactTextString(m) } +func (*ChaincodeInvocationSpec) ProtoMessage() {} +func (*ChaincodeInvocationSpec) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } func (m *ChaincodeInvocationSpec) GetChaincodeSpec() *ChaincodeSpec { if m != nil { @@ -291,9 +304,10 @@ type ChaincodeSecurityContext struct { TxTimestamp *google_protobuf.Timestamp `protobuf:"bytes,7,opt,name=txTimestamp" json:"txTimestamp,omitempty"` } -func (m *ChaincodeSecurityContext) Reset() { *m = ChaincodeSecurityContext{} } -func (m *ChaincodeSecurityContext) String() string { return proto.CompactTextString(m) } -func (*ChaincodeSecurityContext) ProtoMessage() {} +func (m *ChaincodeSecurityContext) Reset() { *m = ChaincodeSecurityContext{} } +func (m *ChaincodeSecurityContext) String() string { return proto.CompactTextString(m) } +func (*ChaincodeSecurityContext) ProtoMessage() {} +func (*ChaincodeSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } func (m *ChaincodeSecurityContext) GetTxTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -314,9 +328,10 @@ type ChaincodeMessage struct { ChaincodeEvent *ChaincodeEvent `protobuf:"bytes,6,opt,name=chaincodeEvent" json:"chaincodeEvent,omitempty"` } -func (m *ChaincodeMessage) Reset() { *m = ChaincodeMessage{} } -func (m *ChaincodeMessage) String() string { return proto.CompactTextString(m) } -func (*ChaincodeMessage) ProtoMessage() {} +func (m *ChaincodeMessage) Reset() { *m = ChaincodeMessage{} } +func (m *ChaincodeMessage) String() string { return proto.CompactTextString(m) } +func (*ChaincodeMessage) ProtoMessage() {} +func (*ChaincodeMessage) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } func (m *ChaincodeMessage) GetTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -344,53 +359,59 @@ type PutStateInfo struct { Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *PutStateInfo) Reset() { *m = PutStateInfo{} } -func (m *PutStateInfo) String() string { return proto.CompactTextString(m) } -func (*PutStateInfo) ProtoMessage() {} +func (m *PutStateInfo) Reset() { *m = PutStateInfo{} } +func (m *PutStateInfo) String() string { return proto.CompactTextString(m) } +func (*PutStateInfo) ProtoMessage() {} +func (*PutStateInfo) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } type RangeQueryState struct { StartKey string `protobuf:"bytes,1,opt,name=startKey" json:"startKey,omitempty"` EndKey string `protobuf:"bytes,2,opt,name=endKey" json:"endKey,omitempty"` } -func (m *RangeQueryState) Reset() { *m = RangeQueryState{} } -func (m *RangeQueryState) String() string { return proto.CompactTextString(m) } -func (*RangeQueryState) ProtoMessage() {} +func (m *RangeQueryState) Reset() { *m = RangeQueryState{} } +func (m *RangeQueryState) String() string { return proto.CompactTextString(m) } +func (*RangeQueryState) ProtoMessage() {} +func (*RangeQueryState) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } type RangeQueryStateNext struct { - ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"` + ID string `protobuf:"bytes,1,opt,name=ID,json=iD" json:"ID,omitempty"` } -func (m *RangeQueryStateNext) Reset() { *m = RangeQueryStateNext{} } -func (m *RangeQueryStateNext) String() string { return proto.CompactTextString(m) } -func (*RangeQueryStateNext) ProtoMessage() {} +func (m *RangeQueryStateNext) Reset() { *m = RangeQueryStateNext{} } +func (m *RangeQueryStateNext) String() string { return proto.CompactTextString(m) } +func (*RangeQueryStateNext) ProtoMessage() {} +func (*RangeQueryStateNext) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } type RangeQueryStateClose struct { - ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"` + ID string `protobuf:"bytes,1,opt,name=ID,json=iD" json:"ID,omitempty"` } -func (m *RangeQueryStateClose) Reset() { *m = RangeQueryStateClose{} } -func (m *RangeQueryStateClose) String() string { return proto.CompactTextString(m) } -func (*RangeQueryStateClose) ProtoMessage() {} +func (m *RangeQueryStateClose) Reset() { *m = RangeQueryStateClose{} } +func (m *RangeQueryStateClose) String() string { return proto.CompactTextString(m) } +func (*RangeQueryStateClose) ProtoMessage() {} +func (*RangeQueryStateClose) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } type RangeQueryStateKeyValue struct { Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *RangeQueryStateKeyValue) Reset() { *m = RangeQueryStateKeyValue{} } -func (m *RangeQueryStateKeyValue) String() string { return proto.CompactTextString(m) } -func (*RangeQueryStateKeyValue) ProtoMessage() {} +func (m *RangeQueryStateKeyValue) Reset() { *m = RangeQueryStateKeyValue{} } +func (m *RangeQueryStateKeyValue) String() string { return proto.CompactTextString(m) } +func (*RangeQueryStateKeyValue) ProtoMessage() {} +func (*RangeQueryStateKeyValue) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } type RangeQueryStateResponse struct { KeysAndValues []*RangeQueryStateKeyValue `protobuf:"bytes,1,rep,name=keysAndValues" json:"keysAndValues,omitempty"` HasMore bool `protobuf:"varint,2,opt,name=hasMore" json:"hasMore,omitempty"` - ID string `protobuf:"bytes,3,opt,name=ID" json:"ID,omitempty"` + ID string `protobuf:"bytes,3,opt,name=ID,json=iD" json:"ID,omitempty"` } -func (m *RangeQueryStateResponse) Reset() { *m = RangeQueryStateResponse{} } -func (m *RangeQueryStateResponse) String() string { return proto.CompactTextString(m) } -func (*RangeQueryStateResponse) ProtoMessage() {} +func (m *RangeQueryStateResponse) Reset() { *m = RangeQueryStateResponse{} } +func (m *RangeQueryStateResponse) String() string { return proto.CompactTextString(m) } +func (*RangeQueryStateResponse) ProtoMessage() {} +func (*RangeQueryStateResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{12} } func (m *RangeQueryStateResponse) GetKeysAndValues() []*RangeQueryStateKeyValue { if m != nil { @@ -400,6 +421,19 @@ func (m *RangeQueryStateResponse) GetKeysAndValues() []*RangeQueryStateKeyValue } func init() { + proto.RegisterType((*ChaincodeID)(nil), "protos.ChaincodeID") + proto.RegisterType((*ChaincodeInput)(nil), "protos.ChaincodeInput") + proto.RegisterType((*ChaincodeSpec)(nil), "protos.ChaincodeSpec") + proto.RegisterType((*ChaincodeDeploymentSpec)(nil), "protos.ChaincodeDeploymentSpec") + proto.RegisterType((*ChaincodeInvocationSpec)(nil), "protos.ChaincodeInvocationSpec") + proto.RegisterType((*ChaincodeSecurityContext)(nil), "protos.ChaincodeSecurityContext") + proto.RegisterType((*ChaincodeMessage)(nil), "protos.ChaincodeMessage") + proto.RegisterType((*PutStateInfo)(nil), "protos.PutStateInfo") + proto.RegisterType((*RangeQueryState)(nil), "protos.RangeQueryState") + proto.RegisterType((*RangeQueryStateNext)(nil), "protos.RangeQueryStateNext") + proto.RegisterType((*RangeQueryStateClose)(nil), "protos.RangeQueryStateClose") + proto.RegisterType((*RangeQueryStateKeyValue)(nil), "protos.RangeQueryStateKeyValue") + proto.RegisterType((*RangeQueryStateResponse)(nil), "protos.RangeQueryStateResponse") proto.RegisterEnum("protos.ConfidentialityLevel", ConfidentialityLevel_name, ConfidentialityLevel_value) proto.RegisterEnum("protos.ChaincodeSpec_Type", ChaincodeSpec_Type_name, ChaincodeSpec_Type_value) proto.RegisterEnum("protos.ChaincodeDeploymentSpec_ExecutionEnvironment", ChaincodeDeploymentSpec_ExecutionEnvironment_name, ChaincodeDeploymentSpec_ExecutionEnvironment_value) @@ -410,6 +444,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for ChaincodeSupport service type ChaincodeSupportClient interface { @@ -503,4 +541,85 @@ var _ChaincodeSupport_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, + Metadata: fileDescriptor2, +} + +func init() { proto.RegisterFile("chaincode.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 1173 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x56, 0x4b, 0x6f, 0xdb, 0x46, + 0x10, 0x8e, 0x5e, 0x7e, 0x8c, 0x5e, 0xcc, 0x5a, 0x71, 0x04, 0xf7, 0x91, 0x80, 0x48, 0x03, 0xa3, + 0x07, 0x25, 0x55, 0xd3, 0xa2, 0x40, 0x8b, 0xa0, 0x0c, 0xb9, 0x71, 0x19, 0xcb, 0x94, 0xb2, 0xa2, + 0x8d, 0xe4, 0x64, 0xd0, 0xd4, 0x9a, 0x26, 0x22, 0x93, 0x04, 0xb9, 0x32, 0xec, 0x5b, 0xcf, 0x3d, + 0xf5, 0x37, 0xf4, 0x47, 0xf4, 0xd0, 0x1f, 0xd4, 0xdf, 0xd1, 0xdd, 0xe5, 0x43, 0xcf, 0x04, 0x01, + 0x7a, 0xd2, 0xce, 0xcc, 0x37, 0xb3, 0xb3, 0xdf, 0x7e, 0x3b, 0x14, 0xb4, 0xdd, 0x2b, 0xc7, 0x0f, + 0xdc, 0x70, 0x42, 0x7b, 0x51, 0x1c, 0xb2, 0x10, 0x6d, 0xc9, 0x9f, 0xe4, 0xa0, 0x53, 0x04, 0xe8, + 0x0d, 0x0d, 0x58, 0x1a, 0x3d, 0x78, 0xe4, 0x85, 0xa1, 0x37, 0xa5, 0xcf, 0xa4, 0x75, 0x31, 0xbb, + 0x7c, 0xc6, 0xfc, 0x6b, 0x9a, 0x30, 0xe7, 0x3a, 0x4a, 0x01, 0xea, 0x0f, 0x50, 0xd7, 0xf3, 0x44, + 0xd3, 0x40, 0x08, 0xaa, 0x91, 0xc3, 0xae, 0xba, 0xa5, 0xc7, 0xa5, 0xc3, 0x5d, 0x22, 0xd7, 0xc2, + 0x17, 0x38, 0xd7, 0xb4, 0x5b, 0x4e, 0x7d, 0x62, 0xad, 0x3e, 0x81, 0xd6, 0x3c, 0x2d, 0x88, 0x66, + 0x4c, 0xa0, 0x9c, 0xd8, 0x4b, 0x78, 0x66, 0xe5, 0xb0, 0x41, 0xe4, 0x5a, 0xfd, 0xbb, 0x02, 0xcd, + 0x02, 0x36, 0x8e, 0xa8, 0x8b, 0x7a, 0x50, 0x65, 0x77, 0x11, 0x95, 0xf5, 0x5b, 0xfd, 0x83, 0xb4, + 0x89, 0xa4, 0xb7, 0x04, 0xea, 0xd9, 0x1c, 0x41, 0x24, 0x0e, 0xf1, 0xf6, 0xdc, 0x79, 0x7b, 0xb2, + 0x85, 0x7a, 0x7f, 0x6f, 0x2d, 0xcd, 0x34, 0xc8, 0x22, 0x0e, 0x3d, 0x87, 0x6d, 0x97, 0x85, 0xf1, + 0x49, 0xe2, 0x75, 0x2b, 0x32, 0x65, 0x7f, 0x3d, 0x45, 0x74, 0x4d, 0x72, 0x18, 0xea, 0xc2, 0xb6, + 0xa0, 0x26, 0x9c, 0xb1, 0x6e, 0x95, 0x67, 0xd4, 0x48, 0x6e, 0xa2, 0x27, 0xd0, 0x4c, 0xa8, 0x3b, + 0x8b, 0xa9, 0x1e, 0x06, 0x8c, 0xde, 0xb2, 0x6e, 0x4d, 0xf2, 0xb0, 0xec, 0x44, 0x23, 0xe8, 0xb8, + 0x61, 0x70, 0xe9, 0x4f, 0x38, 0xf7, 0xbe, 0x33, 0xf5, 0xd9, 0xdd, 0x80, 0xdf, 0xc3, 0xb4, 0xbb, + 0x25, 0x0f, 0xfa, 0x65, 0xb1, 0xfd, 0x06, 0x0c, 0xd9, 0x98, 0x89, 0x0e, 0x60, 0xe7, 0x9a, 0x32, + 0x67, 0xe2, 0x30, 0xa7, 0xbb, 0xcd, 0xab, 0x34, 0x48, 0x61, 0xa3, 0xaf, 0x01, 0x1c, 0xc6, 0x62, + 0xff, 0x62, 0xc6, 0x68, 0xd2, 0xdd, 0xe1, 0x94, 0xef, 0x92, 0x05, 0x8f, 0xfa, 0x12, 0xaa, 0x82, + 0x44, 0xd4, 0x84, 0xdd, 0x53, 0xcb, 0xc0, 0xaf, 0x4d, 0x0b, 0x1b, 0xca, 0x3d, 0x04, 0xb0, 0x75, + 0x34, 0x1c, 0x68, 0xd6, 0x91, 0x52, 0x42, 0x3b, 0x50, 0xb5, 0x86, 0x06, 0x56, 0xca, 0x68, 0x1b, + 0x2a, 0xba, 0x46, 0x94, 0x8a, 0x70, 0xbd, 0xd1, 0xce, 0x34, 0xa5, 0xaa, 0xfe, 0x53, 0x86, 0x87, + 0x05, 0x53, 0x06, 0x8d, 0xa6, 0xe1, 0xdd, 0x35, 0x6f, 0x4f, 0x5e, 0xe1, 0xcf, 0xd0, 0x74, 0x17, + 0xaf, 0x4b, 0xde, 0x65, 0xbd, 0xff, 0x60, 0xe3, 0x5d, 0x92, 0x65, 0x2c, 0xfa, 0x15, 0x9a, 0xf4, + 0xf2, 0x92, 0xba, 0xcc, 0xbf, 0xa1, 0x86, 0xc3, 0x68, 0x76, 0xa3, 0x07, 0xbd, 0x54, 0xa7, 0xbd, + 0x5c, 0xa7, 0x3d, 0x3b, 0xd7, 0x29, 0x59, 0x4e, 0x40, 0x8f, 0xb9, 0x22, 0x78, 0xb5, 0x91, 0xe3, + 0x7e, 0x70, 0x3c, 0x2a, 0xaf, 0xb7, 0x41, 0x16, 0x5d, 0xc8, 0x82, 0x6d, 0x7a, 0x4b, 0x5d, 0x1c, + 0xdc, 0xc8, 0xab, 0x6c, 0xf5, 0x5f, 0xac, 0xb5, 0xb6, 0x7c, 0xa4, 0x1e, 0xe6, 0xf0, 0x19, 0xf3, + 0xc3, 0x80, 0xe7, 0xf8, 0x71, 0x18, 0x88, 0x00, 0xc9, 0x8b, 0xa8, 0x3d, 0xe8, 0x6c, 0x02, 0x08, + 0x36, 0x8d, 0xa1, 0x7e, 0x8c, 0x49, 0xca, 0xec, 0xf8, 0xfd, 0xd8, 0xc6, 0x27, 0x4a, 0x49, 0xfd, + 0xbd, 0xb4, 0x40, 0x9e, 0x19, 0xdc, 0x84, 0xae, 0x23, 0x52, 0xff, 0x3f, 0x79, 0x87, 0xd0, 0xf6, + 0x27, 0x47, 0x34, 0xa0, 0xb1, 0x2c, 0xa8, 0x4d, 0xbd, 0xec, 0x4d, 0xae, 0xba, 0xd5, 0x3f, 0xcb, + 0xd0, 0x9d, 0x97, 0x12, 0x42, 0xe5, 0xb2, 0xca, 0xa5, 0xca, 0xc5, 0xe3, 0x3a, 0xd3, 0x29, 0x8d, + 0x75, 0x1a, 0x33, 0xd9, 0x40, 0x83, 0x2c, 0x78, 0xe6, 0xf1, 0xb1, 0xef, 0x05, 0x72, 0x87, 0x22, + 0x2e, 0x3c, 0xe2, 0xa9, 0x44, 0xce, 0xdd, 0x34, 0x74, 0x26, 0x19, 0xfb, 0xb9, 0x29, 0x22, 0x17, + 0x7e, 0x30, 0xf1, 0x03, 0x4f, 0x32, 0xcf, 0x23, 0x99, 0xb9, 0x24, 0xe6, 0xda, 0x8a, 0x98, 0x9f, + 0x42, 0x2b, 0x72, 0x62, 0xce, 0xe8, 0x49, 0x8e, 0xd8, 0x92, 0x88, 0x15, 0x2f, 0xfa, 0x05, 0xea, + 0xec, 0xb6, 0xd0, 0x85, 0x7c, 0x13, 0x9f, 0x56, 0xce, 0x22, 0x5c, 0xfd, 0xab, 0x06, 0x4a, 0x41, + 0xc9, 0x09, 0x4d, 0x12, 0x21, 0x95, 0xef, 0x96, 0xc6, 0xd1, 0x57, 0x6b, 0xb7, 0x90, 0xe1, 0x16, + 0x27, 0xd2, 0x4f, 0xb0, 0x5b, 0xcc, 0xd0, 0xcf, 0x50, 0xef, 0x1c, 0xfc, 0x09, 0xde, 0xf8, 0xec, + 0x64, 0xb7, 0xfe, 0x44, 0x92, 0xc6, 0x27, 0xac, 0x58, 0xa3, 0x37, 0xd0, 0x4e, 0x96, 0x2f, 0x4e, + 0x12, 0x57, 0xef, 0x3f, 0x5e, 0xd7, 0xca, 0x32, 0x8e, 0xac, 0x26, 0xa2, 0x97, 0xd0, 0x2a, 0x94, + 0x84, 0xc5, 0xd7, 0x41, 0x32, 0xbc, 0x69, 0x2a, 0xca, 0x28, 0x59, 0x41, 0xab, 0xff, 0x96, 0x37, + 0xcf, 0x93, 0x06, 0xec, 0x10, 0x7c, 0x64, 0x72, 0xdd, 0x13, 0x3e, 0x51, 0x5a, 0x00, 0xb9, 0xc5, + 0xa3, 0x65, 0x31, 0x4e, 0x4c, 0xcb, 0xb4, 0xf9, 0x60, 0xd9, 0x85, 0x1a, 0xc1, 0x9a, 0xf1, 0x5e, + 0xa9, 0xa2, 0x36, 0xd4, 0x6d, 0xa2, 0x59, 0x63, 0x4d, 0xb7, 0xcd, 0xa1, 0xa5, 0xd4, 0x44, 0x49, + 0x7d, 0x78, 0x32, 0x1a, 0x60, 0x9b, 0x27, 0x6d, 0x09, 0x28, 0x26, 0x64, 0x48, 0x94, 0x6d, 0x11, + 0x39, 0xc2, 0xf6, 0xf9, 0xd8, 0xd6, 0x6c, 0xac, 0xec, 0x08, 0x73, 0x74, 0x9a, 0x9b, 0xbb, 0xc2, + 0x34, 0xf0, 0x20, 0x33, 0x01, 0x75, 0x40, 0x31, 0xad, 0xb3, 0xe1, 0x31, 0x3e, 0xd7, 0x7f, 0xd3, + 0x4c, 0x4b, 0x17, 0xa3, 0xad, 0x8e, 0x14, 0x68, 0x64, 0xde, 0xb7, 0xa7, 0x98, 0xbc, 0x57, 0x1a, + 0x69, 0xcb, 0xe3, 0xd1, 0xd0, 0x1a, 0x63, 0xa5, 0x29, 0x76, 0x4b, 0x03, 0x2d, 0xb4, 0x07, 0x6d, + 0xb9, 0x3c, 0x9f, 0x77, 0xd3, 0x16, 0xdd, 0xa6, 0xce, 0xb4, 0x27, 0x05, 0x3d, 0x80, 0xfb, 0xbc, + 0xfb, 0xa3, 0xac, 0x5e, 0xb6, 0xfb, 0x7d, 0x2e, 0xef, 0xfd, 0x35, 0xf7, 0xb9, 0x85, 0xdf, 0xd9, + 0x0a, 0x42, 0x5f, 0xc0, 0xc3, 0xf5, 0x98, 0x3e, 0x18, 0xf2, 0x06, 0xf6, 0xc4, 0x29, 0x8e, 0x31, + 0x1e, 0x69, 0x03, 0xf3, 0x0c, 0x2b, 0x1d, 0xf5, 0x47, 0x68, 0x8c, 0x66, 0x6c, 0xcc, 0xf8, 0xa0, + 0x33, 0x83, 0xcb, 0x90, 0xf7, 0x5f, 0xf9, 0x40, 0xef, 0xb2, 0xaf, 0xb1, 0x58, 0xf2, 0x73, 0xd6, + 0x6e, 0x9c, 0xe9, 0x8c, 0x66, 0xef, 0x32, 0x35, 0x54, 0x0c, 0x6d, 0xe2, 0x04, 0x1e, 0x7d, 0x3b, + 0xa3, 0xf1, 0x9d, 0x4c, 0x17, 0x2f, 0x8e, 0xcb, 0x2e, 0x66, 0xc7, 0x45, 0x7e, 0x61, 0xa3, 0x7d, + 0xd8, 0xa2, 0xc1, 0x44, 0x44, 0xd2, 0xf9, 0x91, 0x59, 0xea, 0x37, 0xb0, 0xb7, 0x52, 0xc6, 0x12, + 0xf2, 0x69, 0x41, 0x99, 0x7f, 0x7b, 0xd3, 0x22, 0x65, 0xdf, 0x50, 0x9f, 0x42, 0x67, 0x05, 0xa6, + 0x4f, 0xc3, 0x84, 0xae, 0xe1, 0x34, 0x7e, 0xf2, 0x65, 0x1c, 0xdf, 0xe4, 0x4c, 0x34, 0xfc, 0xd9, + 0x07, 0xfb, 0xa3, 0xb4, 0x56, 0x83, 0xd0, 0x24, 0x0a, 0x03, 0xbe, 0x1d, 0x86, 0x26, 0x4f, 0x4c, + 0xb4, 0x60, 0x22, 0x6b, 0xa6, 0x7f, 0x3d, 0xea, 0xfd, 0x47, 0xb9, 0xa8, 0x3f, 0xb2, 0x37, 0x59, + 0xce, 0x12, 0xcf, 0xf2, 0xca, 0x49, 0x4e, 0xc2, 0x38, 0xdd, 0x7a, 0x87, 0xe4, 0x66, 0x76, 0x9e, + 0x4a, 0x7e, 0x9e, 0x6f, 0x5f, 0x40, 0x67, 0xd3, 0xf7, 0x5b, 0x0c, 0xff, 0xd1, 0xe9, 0xab, 0x81, + 0xa9, 0xf3, 0x27, 0xc1, 0x15, 0xa7, 0x0f, 0xad, 0xd7, 0xa6, 0x81, 0x2d, 0xdb, 0xd4, 0x06, 0x4a, + 0xa9, 0xff, 0x6e, 0x61, 0xee, 0x8c, 0x67, 0x51, 0x14, 0xf2, 0x11, 0x6b, 0x70, 0x15, 0x52, 0xcf, + 0x4f, 0x18, 0x8d, 0x51, 0xf7, 0x63, 0x53, 0xe7, 0xe0, 0xa3, 0x11, 0xf5, 0xde, 0x61, 0xe9, 0x79, + 0xe9, 0x55, 0x17, 0xf6, 0xc3, 0xd8, 0xeb, 0x5d, 0xf1, 0x97, 0x19, 0x4f, 0xe9, 0xc4, 0xa3, 0x71, + 0x96, 0x70, 0x91, 0xfe, 0x29, 0xfc, 0xfe, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x55, 0xfc, + 0xa4, 0x2e, 0x0a, 0x00, 0x00, } diff --git a/protos/chaincode.proto b/protos/chaincode.proto index cc62c23a1dc..0711a88c289 100644 --- a/protos/chaincode.proto +++ b/protos/chaincode.proto @@ -46,6 +46,8 @@ message ChaincodeID { } // Carries the chaincode function and its arguments. +// UnmarshalJSON in transaction.go converts the string-based REST/JSON input to +// the []byte-based current ChaincodeInput structure. message ChaincodeInput { repeated bytes args = 1; } diff --git a/protos/chaincodeevent.pb.go b/protos/chaincodeevent.pb.go index f555bbf5af3..a14e6404b03 100644 --- a/protos/chaincodeevent.pb.go +++ b/protos/chaincodeevent.pb.go @@ -22,6 +22,27 @@ type ChaincodeEvent struct { Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"` } -func (m *ChaincodeEvent) Reset() { *m = ChaincodeEvent{} } -func (m *ChaincodeEvent) String() string { return proto.CompactTextString(m) } -func (*ChaincodeEvent) ProtoMessage() {} +func (m *ChaincodeEvent) Reset() { *m = ChaincodeEvent{} } +func (m *ChaincodeEvent) String() string { return proto.CompactTextString(m) } +func (*ChaincodeEvent) ProtoMessage() {} +func (*ChaincodeEvent) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func init() { + proto.RegisterType((*ChaincodeEvent)(nil), "protos.ChaincodeEvent") +} + +func init() { proto.RegisterFile("chaincodeevent.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 151 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x49, 0xce, 0x48, 0xcc, + 0xcc, 0x4b, 0xce, 0x4f, 0x49, 0x4d, 0x2d, 0x4b, 0xcd, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x62, 0x03, 0x53, 0xc5, 0x4a, 0x75, 0x5c, 0x7c, 0xce, 0x30, 0x79, 0x57, 0x90, 0xbc, 0x90, + 0x02, 0x17, 0x37, 0x5c, 0x87, 0xa7, 0x8b, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0xb2, 0x90, + 0x90, 0x10, 0x17, 0x4b, 0x49, 0x05, 0x50, 0x8a, 0x09, 0x2c, 0x05, 0x66, 0x0b, 0xc9, 0x70, 0x71, + 0x82, 0x8d, 0xf7, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x06, 0x4b, 0x20, 0x04, 0x84, 0x24, 0xb8, 0xd8, + 0x0b, 0x12, 0x2b, 0x73, 0xf2, 0x13, 0x53, 0x24, 0x58, 0x80, 0x72, 0x3c, 0x41, 0x30, 0xae, 0x93, + 0x04, 0x97, 0x58, 0x7e, 0x51, 0xba, 0x5e, 0x46, 0x65, 0x41, 0x6a, 0x51, 0x4e, 0x6a, 0x4a, 0x7a, + 0x6a, 0x11, 0xc4, 0x81, 0xc5, 0x49, 0x10, 0x17, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe7, + 0x84, 0xee, 0x6a, 0xc0, 0x00, 0x00, 0x00, +} diff --git a/protos/devops.pb.go b/protos/devops.pb.go index 6fae2a7e37d..b5e57309fea 100644 --- a/protos/devops.pb.go +++ b/protos/devops.pb.go @@ -40,6 +40,7 @@ var BuildResult_StatusCode_value = map[string]int32{ func (x BuildResult_StatusCode) String() string { return proto.EnumName(BuildResult_StatusCode_name, int32(x)) } +func (BuildResult_StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{4, 0} } // Secret is a temporary object to establish security with the Devops. // A better solution using certificate will be introduced later @@ -48,9 +49,10 @@ type Secret struct { EnrollSecret string `protobuf:"bytes,2,opt,name=enrollSecret" json:"enrollSecret,omitempty"` } -func (m *Secret) Reset() { *m = Secret{} } -func (m *Secret) String() string { return proto.CompactTextString(m) } -func (*Secret) ProtoMessage() {} +func (m *Secret) Reset() { *m = Secret{} } +func (m *Secret) String() string { return proto.CompactTextString(m) } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } type SigmaInput struct { Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` @@ -58,9 +60,10 @@ type SigmaInput struct { Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` } -func (m *SigmaInput) Reset() { *m = SigmaInput{} } -func (m *SigmaInput) String() string { return proto.CompactTextString(m) } -func (*SigmaInput) ProtoMessage() {} +func (m *SigmaInput) Reset() { *m = SigmaInput{} } +func (m *SigmaInput) String() string { return proto.CompactTextString(m) } +func (*SigmaInput) ProtoMessage() {} +func (*SigmaInput) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } func (m *SigmaInput) GetSecret() *Secret { if m != nil { @@ -74,9 +77,10 @@ type ExecuteWithBinding struct { Binding []byte `protobuf:"bytes,2,opt,name=binding,proto3" json:"binding,omitempty"` } -func (m *ExecuteWithBinding) Reset() { *m = ExecuteWithBinding{} } -func (m *ExecuteWithBinding) String() string { return proto.CompactTextString(m) } -func (*ExecuteWithBinding) ProtoMessage() {} +func (m *ExecuteWithBinding) Reset() { *m = ExecuteWithBinding{} } +func (m *ExecuteWithBinding) String() string { return proto.CompactTextString(m) } +func (*ExecuteWithBinding) ProtoMessage() {} +func (*ExecuteWithBinding) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } func (m *ExecuteWithBinding) GetChaincodeInvocationSpec() *ChaincodeInvocationSpec { if m != nil { @@ -91,9 +95,10 @@ type SigmaOutput struct { Asn1Encoding []byte `protobuf:"bytes,3,opt,name=asn1Encoding,proto3" json:"asn1Encoding,omitempty"` } -func (m *SigmaOutput) Reset() { *m = SigmaOutput{} } -func (m *SigmaOutput) String() string { return proto.CompactTextString(m) } -func (*SigmaOutput) ProtoMessage() {} +func (m *SigmaOutput) Reset() { *m = SigmaOutput{} } +func (m *SigmaOutput) String() string { return proto.CompactTextString(m) } +func (*SigmaOutput) ProtoMessage() {} +func (*SigmaOutput) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} } type BuildResult struct { Status BuildResult_StatusCode `protobuf:"varint,1,opt,name=status,enum=protos.BuildResult_StatusCode" json:"status,omitempty"` @@ -101,9 +106,10 @@ type BuildResult struct { DeploymentSpec *ChaincodeDeploymentSpec `protobuf:"bytes,3,opt,name=deploymentSpec" json:"deploymentSpec,omitempty"` } -func (m *BuildResult) Reset() { *m = BuildResult{} } -func (m *BuildResult) String() string { return proto.CompactTextString(m) } -func (*BuildResult) ProtoMessage() {} +func (m *BuildResult) Reset() { *m = BuildResult{} } +func (m *BuildResult) String() string { return proto.CompactTextString(m) } +func (*BuildResult) ProtoMessage() {} +func (*BuildResult) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{4} } func (m *BuildResult) GetDeploymentSpec() *ChaincodeDeploymentSpec { if m != nil { @@ -116,11 +122,18 @@ type TransactionRequest struct { TransactionUuid string `protobuf:"bytes,1,opt,name=transactionUuid" json:"transactionUuid,omitempty"` } -func (m *TransactionRequest) Reset() { *m = TransactionRequest{} } -func (m *TransactionRequest) String() string { return proto.CompactTextString(m) } -func (*TransactionRequest) ProtoMessage() {} +func (m *TransactionRequest) Reset() { *m = TransactionRequest{} } +func (m *TransactionRequest) String() string { return proto.CompactTextString(m) } +func (*TransactionRequest) ProtoMessage() {} +func (*TransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{5} } func init() { + proto.RegisterType((*Secret)(nil), "protos.Secret") + proto.RegisterType((*SigmaInput)(nil), "protos.SigmaInput") + proto.RegisterType((*ExecuteWithBinding)(nil), "protos.ExecuteWithBinding") + proto.RegisterType((*SigmaOutput)(nil), "protos.SigmaOutput") + proto.RegisterType((*BuildResult)(nil), "protos.BuildResult") + proto.RegisterType((*TransactionRequest)(nil), "protos.TransactionRequest") proto.RegisterEnum("protos.BuildResult_StatusCode", BuildResult_StatusCode_name, BuildResult_StatusCode_value) } @@ -128,6 +141,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Devops service type DevopsClient interface { @@ -269,112 +286,166 @@ func RegisterDevopsServer(s *grpc.Server, srv DevopsServer) { s.RegisterService(&_Devops_serviceDesc, srv) } -func _Devops_Login_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_Login_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Secret) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).Login(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).Login(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/Login", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).Login(ctx, req.(*Secret)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_Build_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_Build_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ChaincodeSpec) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).Build(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).Build(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/Build", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).Build(ctx, req.(*ChaincodeSpec)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_Deploy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_Deploy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ChaincodeSpec) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).Deploy(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).Deploy(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/Deploy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).Deploy(ctx, req.(*ChaincodeSpec)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_Invoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_Invoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ChaincodeInvocationSpec) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).Invoke(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).Invoke(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/Invoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).Invoke(ctx, req.(*ChaincodeInvocationSpec)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ChaincodeInvocationSpec) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).Query(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).Query(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).Query(ctx, req.(*ChaincodeInvocationSpec)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_EXP_GetApplicationTCert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_EXP_GetApplicationTCert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Secret) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).EXP_GetApplicationTCert(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).EXP_GetApplicationTCert(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/EXP_GetApplicationTCert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).EXP_GetApplicationTCert(ctx, req.(*Secret)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_EXP_PrepareForTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_EXP_PrepareForTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Secret) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).EXP_PrepareForTx(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).EXP_PrepareForTx(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/EXP_PrepareForTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).EXP_PrepareForTx(ctx, req.(*Secret)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_EXP_ProduceSigma_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_EXP_ProduceSigma_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SigmaInput) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).EXP_ProduceSigma(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).EXP_ProduceSigma(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/EXP_ProduceSigma", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).EXP_ProduceSigma(ctx, req.(*SigmaInput)) + } + return interceptor(ctx, in, info, handler) } -func _Devops_EXP_ExecuteWithBinding_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Devops_EXP_ExecuteWithBinding_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ExecuteWithBinding) if err := dec(in); err != nil { return nil, err } - out, err := srv.(DevopsServer).EXP_ExecuteWithBinding(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(DevopsServer).EXP_ExecuteWithBinding(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Devops/EXP_ExecuteWithBinding", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevopsServer).EXP_ExecuteWithBinding(ctx, req.(*ExecuteWithBinding)) + } + return interceptor(ctx, in, info, handler) } var _Devops_serviceDesc = grpc.ServiceDesc{ @@ -418,5 +489,48 @@ var _Devops_serviceDesc = grpc.ServiceDesc{ Handler: _Devops_EXP_ExecuteWithBinding_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor3, +} + +func init() { proto.RegisterFile("devops.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 572 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xae, 0xdb, 0x26, 0xa5, 0x93, 0xd0, 0x5a, 0x23, 0xa0, 0x91, 0x0f, 0x80, 0x7c, 0x40, 0x95, + 0x90, 0x2a, 0x51, 0x44, 0x0f, 0x88, 0x22, 0xe5, 0xc7, 0x69, 0x23, 0x55, 0xa5, 0xd8, 0x89, 0x80, + 0x03, 0x42, 0x1b, 0x7b, 0x49, 0x2d, 0x9c, 0x5d, 0xe3, 0x5d, 0x57, 0xed, 0x23, 0xf0, 0x42, 0xbc, + 0x0a, 0xaf, 0x43, 0x76, 0xd7, 0x36, 0x24, 0x4d, 0xd5, 0x0a, 0x4e, 0xd9, 0x99, 0xf9, 0xbe, 0xf9, + 0xf2, 0x8d, 0x67, 0x17, 0x9a, 0x11, 0xbd, 0xe0, 0xa9, 0xd8, 0x4b, 0x33, 0x2e, 0x39, 0xd6, 0xf5, + 0x8f, 0x70, 0xb6, 0xc3, 0x73, 0x12, 0xb3, 0x90, 0x47, 0xd4, 0x14, 0x9c, 0xe6, 0x57, 0x32, 0xce, + 0xe2, 0xd0, 0x44, 0xee, 0x31, 0xd4, 0x03, 0x1a, 0x66, 0x54, 0xa2, 0x03, 0xf7, 0x28, 0xcb, 0x78, + 0x92, 0x0c, 0xa2, 0x96, 0xf5, 0xd4, 0xda, 0xdd, 0xf4, 0xab, 0x18, 0x5d, 0x68, 0x9a, 0xb3, 0xc1, + 0xb6, 0x56, 0x75, 0x7d, 0x2e, 0xe7, 0x46, 0x00, 0x41, 0x3c, 0x99, 0x92, 0x01, 0x4b, 0x73, 0x89, + 0xcf, 0xa0, 0x2e, 0x0c, 0x56, 0xf5, 0x6a, 0xec, 0x6f, 0x19, 0x3d, 0xb1, 0x67, 0xd0, 0x7e, 0x51, + 0x55, 0xaa, 0x24, 0x4d, 0x87, 0x5d, 0x9a, 0x99, 0xae, 0x4d, 0xbf, 0x8a, 0x11, 0x61, 0x3d, 0x22, + 0x92, 0xb4, 0xd6, 0x74, 0x5e, 0x9f, 0xdd, 0x1f, 0x16, 0xa0, 0x77, 0x49, 0xc3, 0x5c, 0xd2, 0x0f, + 0xb1, 0x3c, 0xef, 0xc4, 0x2c, 0x8a, 0xd9, 0x04, 0x3f, 0xc1, 0x4e, 0xe5, 0x73, 0xc0, 0x2e, 0x78, + 0x48, 0x64, 0xcc, 0x59, 0x90, 0xd2, 0xb0, 0xd0, 0x7f, 0x52, 0xea, 0x77, 0x97, 0xc3, 0xfc, 0x9b, + 0xf8, 0xd8, 0x82, 0x8d, 0xb1, 0x51, 0x29, 0xfe, 0x60, 0x19, 0xba, 0x9f, 0xa1, 0xa1, 0x1d, 0xbf, + 0xcb, 0xa5, 0xb2, 0xfc, 0x00, 0x6a, 0x32, 0x54, 0x3e, 0x2c, 0x0d, 0x33, 0x81, 0xca, 0x0a, 0x05, + 0x2a, 0xc8, 0x26, 0x50, 0x03, 0x25, 0x82, 0xbd, 0xf0, 0x94, 0xa0, 0xea, 0x6c, 0x2c, 0xce, 0xe5, + 0xdc, 0x5f, 0x16, 0x34, 0x3a, 0x79, 0x9c, 0x44, 0x3e, 0x15, 0x79, 0x22, 0xf1, 0x60, 0x36, 0x52, + 0x49, 0x64, 0x2e, 0xb4, 0xc0, 0xd6, 0xfe, 0xe3, 0xd2, 0xd2, 0x5f, 0xa0, 0xbd, 0x40, 0x23, 0xba, + 0x33, 0x1b, 0x7e, 0x81, 0x46, 0x1b, 0xd6, 0xa6, 0x62, 0x52, 0x7c, 0x33, 0x75, 0xc4, 0x23, 0xd8, + 0x8a, 0x68, 0x9a, 0xf0, 0xab, 0x29, 0x65, 0x52, 0x0f, 0x69, 0xed, 0x86, 0x21, 0xf5, 0xe6, 0x60, + 0xfe, 0x02, 0xcd, 0x7d, 0x35, 0xfb, 0xe6, 0x95, 0x20, 0xde, 0x87, 0xcd, 0xd1, 0x69, 0xcf, 0xeb, + 0x0f, 0x4e, 0xbd, 0x9e, 0xbd, 0x82, 0x0d, 0xd8, 0x08, 0x46, 0xdd, 0xae, 0x17, 0x04, 0xb6, 0xa5, + 0x82, 0x7e, 0x7b, 0x70, 0x32, 0xf2, 0x3d, 0x7b, 0xd5, 0x7d, 0x0b, 0x38, 0xcc, 0x08, 0x13, 0x24, + 0x54, 0x53, 0xf6, 0xe9, 0xf7, 0x9c, 0x0a, 0x89, 0xbb, 0xb0, 0x2d, 0xff, 0x64, 0x47, 0x79, 0x5c, + 0xee, 0xe1, 0x62, 0x7a, 0xff, 0xe7, 0x3a, 0xd4, 0x7b, 0x7a, 0xd9, 0xf1, 0x39, 0xd4, 0x4e, 0xf8, + 0x24, 0x66, 0xb8, 0xb0, 0x60, 0x8e, 0x5d, 0xc6, 0xb3, 0xc1, 0xa4, 0x9c, 0x09, 0xea, 0xae, 0x60, + 0x1b, 0x6a, 0x7a, 0x56, 0xf8, 0xf0, 0x9a, 0x51, 0x65, 0xc7, 0xb9, 0xcd, 0xff, 0xac, 0x45, 0x47, + 0x29, 0xab, 0xdc, 0x7f, 0xf4, 0x38, 0x84, 0xba, 0xda, 0xb1, 0x6f, 0x14, 0x6f, 0xdb, 0xca, 0xa5, + 0x2e, 0xde, 0x40, 0xed, 0x7d, 0x4e, 0xb3, 0xab, 0x7f, 0x63, 0x1f, 0xc2, 0x8e, 0xf7, 0xf1, 0xec, + 0xcb, 0x11, 0x95, 0xed, 0x34, 0x4d, 0x62, 0x83, 0x36, 0xf7, 0xed, 0x2e, 0x23, 0x3c, 0x00, 0x5b, + 0xd1, 0xcf, 0x32, 0x9a, 0x92, 0x8c, 0xf6, 0x79, 0x36, 0xbc, 0xbc, 0x13, 0xef, 0x75, 0xc9, 0xe3, + 0x51, 0x1e, 0x52, 0x7d, 0x6d, 0x10, 0x2b, 0x5e, 0xf5, 0x6e, 0x2c, 0xe5, 0x1e, 0xc3, 0x23, 0xc5, + 0x5d, 0x72, 0xed, 0x9d, 0x12, 0x7d, 0xbd, 0xb6, 0xac, 0xd3, 0xd8, 0x3c, 0x8a, 0x2f, 0x7f, 0x07, + 0x00, 0x00, 0xff, 0xff, 0xb8, 0xae, 0x0a, 0x87, 0x2b, 0x05, 0x00, 0x00, } diff --git a/protos/devops.proto b/protos/devops.proto index 4c578609bb3..9f8cf4b067c 100644 --- a/protos/devops.proto +++ b/protos/devops.proto @@ -50,10 +50,8 @@ service Devops { // Execute a transaction with a specific binding rpc EXP_ExecuteWithBinding(ExecuteWithBinding) returns (Response) {} - } - // Secret is a temporary object to establish security with the Devops. // A better solution using certificate will be introduced later message Secret { @@ -69,7 +67,7 @@ message SigmaInput { message ExecuteWithBinding { ChaincodeInvocationSpec chaincodeInvocationSpec = 1; - bytes binding = 2; + bytes binding = 2; } message SigmaOutput { @@ -78,7 +76,6 @@ message SigmaOutput { bytes asn1Encoding = 3; } - message BuildResult { enum StatusCode { diff --git a/protos/events.pb.go b/protos/events.pb.go index 58c1c56038f..a0ff1dac9f4 100644 --- a/protos/events.pb.go +++ b/protos/events.pb.go @@ -43,6 +43,7 @@ var EventType_value = map[string]int32{ func (x EventType) String() string { return proto.EnumName(EventType_name, int32(x)) } +func (EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } // ChaincodeReg is used for registering chaincode Interests // when EventType is CHAINCODE @@ -51,9 +52,10 @@ type ChaincodeReg struct { EventName string `protobuf:"bytes,2,opt,name=eventName" json:"eventName,omitempty"` } -func (m *ChaincodeReg) Reset() { *m = ChaincodeReg{} } -func (m *ChaincodeReg) String() string { return proto.CompactTextString(m) } -func (*ChaincodeReg) ProtoMessage() {} +func (m *ChaincodeReg) Reset() { *m = ChaincodeReg{} } +func (m *ChaincodeReg) String() string { return proto.CompactTextString(m) } +func (*ChaincodeReg) ProtoMessage() {} +func (*ChaincodeReg) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } type Interest struct { EventType EventType `protobuf:"varint,1,opt,name=eventType,enum=protos.EventType" json:"eventType,omitempty"` @@ -67,9 +69,10 @@ type Interest struct { RegInfo isInterest_RegInfo `protobuf_oneof:"RegInfo"` } -func (m *Interest) Reset() { *m = Interest{} } -func (m *Interest) String() string { return proto.CompactTextString(m) } -func (*Interest) ProtoMessage() {} +func (m *Interest) Reset() { *m = Interest{} } +func (m *Interest) String() string { return proto.CompactTextString(m) } +func (*Interest) ProtoMessage() {} +func (*Interest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } type isInterest_RegInfo interface { isInterest_RegInfo() @@ -96,8 +99,8 @@ func (m *Interest) GetChaincodeRegInfo() *ChaincodeReg { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*Interest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _Interest_OneofMarshaler, _Interest_OneofUnmarshaler, []interface{}{ +func (*Interest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Interest_OneofMarshaler, _Interest_OneofUnmarshaler, _Interest_OneofSizer, []interface{}{ (*Interest_ChaincodeRegInfo)(nil), } } @@ -134,6 +137,22 @@ func _Interest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffe } } +func _Interest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Interest) + // RegInfo + switch x := m.RegInfo.(type) { + case *Interest_ChaincodeRegInfo: + s := proto.Size(x.ChaincodeRegInfo) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // ---------- consumer events --------- // Register is sent by consumers for registering events // string type - "register" @@ -141,9 +160,10 @@ type Register struct { Events []*Interest `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` } -func (m *Register) Reset() { *m = Register{} } -func (m *Register) String() string { return proto.CompactTextString(m) } -func (*Register) ProtoMessage() {} +func (m *Register) Reset() { *m = Register{} } +func (m *Register) String() string { return proto.CompactTextString(m) } +func (*Register) ProtoMessage() {} +func (*Register) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } func (m *Register) GetEvents() []*Interest { if m != nil { @@ -159,9 +179,10 @@ type Rejection struct { ErrorMsg string `protobuf:"bytes,2,opt,name=errorMsg" json:"errorMsg,omitempty"` } -func (m *Rejection) Reset() { *m = Rejection{} } -func (m *Rejection) String() string { return proto.CompactTextString(m) } -func (*Rejection) ProtoMessage() {} +func (m *Rejection) Reset() { *m = Rejection{} } +func (m *Rejection) String() string { return proto.CompactTextString(m) } +func (*Rejection) ProtoMessage() {} +func (*Rejection) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } func (m *Rejection) GetTx() *Transaction { if m != nil { @@ -175,9 +196,10 @@ type Unregister struct { Events []*Interest `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` } -func (m *Unregister) Reset() { *m = Unregister{} } -func (m *Unregister) String() string { return proto.CompactTextString(m) } -func (*Unregister) ProtoMessage() {} +func (m *Unregister) Reset() { *m = Unregister{} } +func (m *Unregister) String() string { return proto.CompactTextString(m) } +func (*Unregister) ProtoMessage() {} +func (*Unregister) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } func (m *Unregister) GetEvents() []*Interest { if m != nil { @@ -199,9 +221,10 @@ type Event struct { Event isEvent_Event `protobuf_oneof:"Event"` } -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } type isEvent_Event interface { isEvent_Event() @@ -272,8 +295,8 @@ func (m *Event) GetUnregister() *Unregister { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*Event) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _Event_OneofMarshaler, _Event_OneofUnmarshaler, []interface{}{ +func (*Event) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Event_OneofMarshaler, _Event_OneofUnmarshaler, _Event_OneofSizer, []interface{}{ (*Event_Register)(nil), (*Event_Block)(nil), (*Event_ChaincodeEvent)(nil), @@ -366,7 +389,49 @@ func _Event_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) } } +func _Event_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Event) + // Event + switch x := m.Event.(type) { + case *Event_Register: + s := proto.Size(x.Register) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Event_Block: + s := proto.Size(x.Block) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Event_ChaincodeEvent: + s := proto.Size(x.ChaincodeEvent) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Event_Rejection: + s := proto.Size(x.Rejection) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Event_Unregister: + s := proto.Size(x.Unregister) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + func init() { + proto.RegisterType((*ChaincodeReg)(nil), "protos.ChaincodeReg") + proto.RegisterType((*Interest)(nil), "protos.Interest") + proto.RegisterType((*Register)(nil), "protos.Register") + proto.RegisterType((*Rejection)(nil), "protos.Rejection") + proto.RegisterType((*Unregister)(nil), "protos.Unregister") + proto.RegisterType((*Event)(nil), "protos.Event") proto.RegisterEnum("protos.EventType", EventType_name, EventType_value) } @@ -374,6 +439,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Events service type EventsClient interface { @@ -469,4 +538,39 @@ var _Events_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, + Metadata: fileDescriptor4, +} + +func init() { proto.RegisterFile("events.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 445 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x53, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x6d, 0xda, 0xb5, 0xab, 0x6f, 0xd3, 0x29, 0xbb, 0x4c, 0x28, 0x8a, 0x78, 0x98, 0x82, 0x90, + 0xaa, 0x3d, 0x14, 0x08, 0x15, 0xcf, 0x90, 0x2c, 0x62, 0x81, 0x91, 0x4a, 0x26, 0xfc, 0x80, 0x34, + 0x78, 0x23, 0x7c, 0x24, 0x93, 0x1d, 0x10, 0xfc, 0x05, 0x1e, 0xf9, 0xc5, 0x78, 0x8e, 0x9d, 0x64, + 0xf4, 0x69, 0x4f, 0xf5, 0xbd, 0xe7, 0x9c, 0x7b, 0x8f, 0x4f, 0x1d, 0xb0, 0xd9, 0x4f, 0x56, 0x35, + 0x62, 0x7d, 0xc3, 0xeb, 0xa6, 0xc6, 0x99, 0xfa, 0x11, 0xde, 0x49, 0xf1, 0x39, 0x2f, 0xab, 0xa2, + 0xfe, 0xc4, 0x14, 0xdc, 0xa2, 0x9e, 0x7d, 0x95, 0xef, 0x78, 0x59, 0xb4, 0x95, 0x9f, 0x82, 0x1d, + 0x19, 0x16, 0x65, 0xd7, 0x78, 0x0a, 0x8b, 0x4e, 0x95, 0x9c, 0xbb, 0xd6, 0xa9, 0xb5, 0x22, 0x74, + 0xd8, 0xc2, 0x47, 0x40, 0xd4, 0xb8, 0x34, 0xff, 0xce, 0xdc, 0xb1, 0xc2, 0xfb, 0x86, 0xff, 0xc7, + 0x82, 0x79, 0x52, 0x35, 0x8c, 0x33, 0xd1, 0xe0, 0x53, 0x4d, 0xcd, 0x7e, 0xdf, 0x30, 0x35, 0xea, + 0x28, 0x38, 0x6e, 0xf7, 0x8a, 0x75, 0x6c, 0x00, 0xda, 0x73, 0x30, 0x04, 0xa7, 0x18, 0xb8, 0x49, + 0xaa, 0xab, 0x5a, 0xad, 0x58, 0x04, 0x27, 0x46, 0x37, 0x74, 0x7b, 0x31, 0xa2, 0x7b, 0xfc, 0x90, + 0xc0, 0xa1, 0x3e, 0xfa, 0x1b, 0x98, 0xcb, 0x63, 0x29, 0xa4, 0x1d, 0x5c, 0xc1, 0xac, 0x0d, 0x49, + 0x1a, 0x99, 0xc8, 0x81, 0x8e, 0x19, 0x68, 0xdc, 0x52, 0x8d, 0xfb, 0x97, 0x40, 0x28, 0xfb, 0xc2, + 0x8a, 0xa6, 0xac, 0x2b, 0x7c, 0x0c, 0xe3, 0xe6, 0x97, 0xf2, 0xbe, 0x08, 0x1e, 0x18, 0x49, 0xc6, + 0xf3, 0x4a, 0xe4, 0x8a, 0x40, 0x25, 0x8c, 0x1e, 0xcc, 0x19, 0xe7, 0x35, 0x7f, 0x2f, 0xae, 0x75, + 0x22, 0x5d, 0xed, 0xbf, 0x04, 0xf8, 0x58, 0xf1, 0xfb, 0xbb, 0xf8, 0x3b, 0x86, 0xa9, 0xca, 0x08, + 0xd7, 0x30, 0x37, 0x7a, 0x6d, 0xa4, 0x53, 0x99, 0xdb, 0xc9, 0x20, 0x3a, 0x0e, 0x3e, 0x81, 0xe9, + 0xee, 0x5b, 0x5d, 0x7c, 0xd5, 0xc9, 0x2d, 0x0d, 0x39, 0xbc, 0x6d, 0x4a, 0x66, 0x8b, 0xe2, 0x2b, + 0x38, 0xea, 0xb2, 0x53, 0x8b, 0xdc, 0x89, 0xe2, 0x3f, 0xdc, 0x4b, 0x5a, 0xa1, 0x52, 0xf8, 0x1f, + 0x1f, 0x9f, 0x03, 0xe1, 0x26, 0x28, 0xf7, 0x40, 0x89, 0x8f, 0x7b, 0x67, 0x1a, 0x90, 0xba, 0x9e, + 0x85, 0x1b, 0x80, 0x1f, 0x5d, 0x1a, 0xee, 0x54, 0x69, 0xd0, 0x68, 0xfa, 0x9c, 0xa4, 0x68, 0xc0, + 0x0b, 0x0f, 0x75, 0x14, 0x67, 0x21, 0x90, 0xee, 0xdd, 0xa0, 0x2d, 0xff, 0xdd, 0xf8, 0x4d, 0xf2, + 0x21, 0x8b, 0xa9, 0x33, 0x42, 0x02, 0xd3, 0xf0, 0x72, 0x1b, 0xbd, 0x73, 0x2c, 0x5c, 0x02, 0x89, + 0x2e, 0x5e, 0x27, 0x69, 0xb4, 0x3d, 0x8f, 0x9d, 0xf1, 0x6d, 0x49, 0xe3, 0xb7, 0x71, 0x94, 0x25, + 0xdb, 0xd4, 0x99, 0x04, 0x1b, 0x98, 0xa9, 0x19, 0x02, 0xcf, 0xe0, 0x40, 0xde, 0xb1, 0xc1, 0xe5, + 0x9d, 0x37, 0xe9, 0xdd, 0x2d, 0xfd, 0xd1, 0xca, 0x7a, 0x66, 0xed, 0xda, 0x6f, 0xea, 0xc5, 0xbf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x15, 0xb7, 0x5f, 0x65, 0x6a, 0x03, 0x00, 0x00, } diff --git a/protos/fabric.pb.go b/protos/fabric.pb.go index 567e36a9b90..aec5cb053a1 100644 --- a/protos/fabric.pb.go +++ b/protos/fabric.pb.go @@ -7,7 +7,7 @@ package protos import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "google/protobuf" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" @@ -51,6 +51,7 @@ var Transaction_Type_value = map[string]int32{ func (x Transaction_Type) String() string { return proto.EnumName(Transaction_Type_name, int32(x)) } +func (Transaction_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor6, []int{0, 0} } type PeerEndpoint_Type int32 @@ -74,6 +75,7 @@ var PeerEndpoint_Type_value = map[string]int32{ func (x PeerEndpoint_Type) String() string { return proto.EnumName(PeerEndpoint_Type_name, int32(x)) } +func (PeerEndpoint_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor6, []int{8, 0} } type Message_Type int32 @@ -136,6 +138,7 @@ var Message_Type_value = map[string]int32{ func (x Message_Type) String() string { return proto.EnumName(Message_Type_name, int32(x)) } +func (Message_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor6, []int{12, 0} } type Response_StatusCode int32 @@ -159,6 +162,7 @@ var Response_StatusCode_value = map[string]int32{ func (x Response_StatusCode) String() string { return proto.EnumName(Response_StatusCode_name, int32(x)) } +func (Response_StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor6, []int{13, 0} } // Transaction defines a function call to a contract. // `args` is an array of type string so that the chaincode writer can choose @@ -181,9 +185,10 @@ type Transaction struct { Signature []byte `protobuf:"bytes,12,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} +func (*Transaction) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } func (m *Transaction) GetTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -197,9 +202,10 @@ type TransactionBlock struct { Transactions []*Transaction `protobuf:"bytes,1,rep,name=transactions" json:"transactions,omitempty"` } -func (m *TransactionBlock) Reset() { *m = TransactionBlock{} } -func (m *TransactionBlock) String() string { return proto.CompactTextString(m) } -func (*TransactionBlock) ProtoMessage() {} +func (m *TransactionBlock) Reset() { *m = TransactionBlock{} } +func (m *TransactionBlock) String() string { return proto.CompactTextString(m) } +func (*TransactionBlock) ProtoMessage() {} +func (*TransactionBlock) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } func (m *TransactionBlock) GetTransactions() []*Transaction { if m != nil { @@ -223,9 +229,10 @@ type TransactionResult struct { ChaincodeEvent *ChaincodeEvent `protobuf:"bytes,5,opt,name=chaincodeEvent" json:"chaincodeEvent,omitempty"` } -func (m *TransactionResult) Reset() { *m = TransactionResult{} } -func (m *TransactionResult) String() string { return proto.CompactTextString(m) } -func (*TransactionResult) ProtoMessage() {} +func (m *TransactionResult) Reset() { *m = TransactionResult{} } +func (m *TransactionResult) String() string { return proto.CompactTextString(m) } +func (*TransactionResult) ProtoMessage() {} +func (*TransactionResult) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} } func (m *TransactionResult) GetChaincodeEvent() *ChaincodeEvent { if m != nil { @@ -256,9 +263,10 @@ type Block struct { NonHashData *NonHashData `protobuf:"bytes,7,opt,name=nonHashData" json:"nonHashData,omitempty"` } -func (m *Block) Reset() { *m = Block{} } -func (m *Block) String() string { return proto.CompactTextString(m) } -func (*Block) ProtoMessage() {} +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} } func (m *Block) GetTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -289,9 +297,10 @@ type BlockchainInfo struct { PreviousBlockHash []byte `protobuf:"bytes,3,opt,name=previousBlockHash,proto3" json:"previousBlockHash,omitempty"` } -func (m *BlockchainInfo) Reset() { *m = BlockchainInfo{} } -func (m *BlockchainInfo) String() string { return proto.CompactTextString(m) } -func (*BlockchainInfo) ProtoMessage() {} +func (m *BlockchainInfo) Reset() { *m = BlockchainInfo{} } +func (m *BlockchainInfo) String() string { return proto.CompactTextString(m) } +func (*BlockchainInfo) ProtoMessage() {} +func (*BlockchainInfo) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} } // NonHashData is data that is recorded on the block, but not included in // the block hash when verifying the blockchain. @@ -304,9 +313,10 @@ type NonHashData struct { ChaincodeEvents []*ChaincodeEvent `protobuf:"bytes,2,rep,name=chaincodeEvents" json:"chaincodeEvents,omitempty"` } -func (m *NonHashData) Reset() { *m = NonHashData{} } -func (m *NonHashData) String() string { return proto.CompactTextString(m) } -func (*NonHashData) ProtoMessage() {} +func (m *NonHashData) Reset() { *m = NonHashData{} } +func (m *NonHashData) String() string { return proto.CompactTextString(m) } +func (*NonHashData) ProtoMessage() {} +func (*NonHashData) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{5} } func (m *NonHashData) GetLocalLedgerCommitTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -327,28 +337,31 @@ type PeerAddress struct { Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` } -func (m *PeerAddress) Reset() { *m = PeerAddress{} } -func (m *PeerAddress) String() string { return proto.CompactTextString(m) } -func (*PeerAddress) ProtoMessage() {} +func (m *PeerAddress) Reset() { *m = PeerAddress{} } +func (m *PeerAddress) String() string { return proto.CompactTextString(m) } +func (*PeerAddress) ProtoMessage() {} +func (*PeerAddress) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{6} } type PeerID struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } -func (m *PeerID) Reset() { *m = PeerID{} } -func (m *PeerID) String() string { return proto.CompactTextString(m) } -func (*PeerID) ProtoMessage() {} +func (m *PeerID) Reset() { *m = PeerID{} } +func (m *PeerID) String() string { return proto.CompactTextString(m) } +func (*PeerID) ProtoMessage() {} +func (*PeerID) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{7} } type PeerEndpoint struct { - ID *PeerID `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"` + ID *PeerID `protobuf:"bytes,1,opt,name=ID,json=iD" json:"ID,omitempty"` Address string `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"` Type PeerEndpoint_Type `protobuf:"varint,3,opt,name=type,enum=protos.PeerEndpoint_Type" json:"type,omitempty"` PkiID []byte `protobuf:"bytes,4,opt,name=pkiID,proto3" json:"pkiID,omitempty"` } -func (m *PeerEndpoint) Reset() { *m = PeerEndpoint{} } -func (m *PeerEndpoint) String() string { return proto.CompactTextString(m) } -func (*PeerEndpoint) ProtoMessage() {} +func (m *PeerEndpoint) Reset() { *m = PeerEndpoint{} } +func (m *PeerEndpoint) String() string { return proto.CompactTextString(m) } +func (*PeerEndpoint) ProtoMessage() {} +func (*PeerEndpoint) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{8} } func (m *PeerEndpoint) GetID() *PeerID { if m != nil { @@ -361,9 +374,10 @@ type PeersMessage struct { Peers []*PeerEndpoint `protobuf:"bytes,1,rep,name=peers" json:"peers,omitempty"` } -func (m *PeersMessage) Reset() { *m = PeersMessage{} } -func (m *PeersMessage) String() string { return proto.CompactTextString(m) } -func (*PeersMessage) ProtoMessage() {} +func (m *PeersMessage) Reset() { *m = PeersMessage{} } +func (m *PeersMessage) String() string { return proto.CompactTextString(m) } +func (*PeersMessage) ProtoMessage() {} +func (*PeersMessage) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{9} } func (m *PeersMessage) GetPeers() []*PeerEndpoint { if m != nil { @@ -376,18 +390,20 @@ type PeersAddresses struct { Addresses []string `protobuf:"bytes,1,rep,name=addresses" json:"addresses,omitempty"` } -func (m *PeersAddresses) Reset() { *m = PeersAddresses{} } -func (m *PeersAddresses) String() string { return proto.CompactTextString(m) } -func (*PeersAddresses) ProtoMessage() {} +func (m *PeersAddresses) Reset() { *m = PeersAddresses{} } +func (m *PeersAddresses) String() string { return proto.CompactTextString(m) } +func (*PeersAddresses) ProtoMessage() {} +func (*PeersAddresses) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{10} } type HelloMessage struct { PeerEndpoint *PeerEndpoint `protobuf:"bytes,1,opt,name=peerEndpoint" json:"peerEndpoint,omitempty"` BlockchainInfo *BlockchainInfo `protobuf:"bytes,2,opt,name=blockchainInfo" json:"blockchainInfo,omitempty"` } -func (m *HelloMessage) Reset() { *m = HelloMessage{} } -func (m *HelloMessage) String() string { return proto.CompactTextString(m) } -func (*HelloMessage) ProtoMessage() {} +func (m *HelloMessage) Reset() { *m = HelloMessage{} } +func (m *HelloMessage) String() string { return proto.CompactTextString(m) } +func (*HelloMessage) ProtoMessage() {} +func (*HelloMessage) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{11} } func (m *HelloMessage) GetPeerEndpoint() *PeerEndpoint { if m != nil { @@ -410,9 +426,10 @@ type Message struct { Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{12} } func (m *Message) GetTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -426,9 +443,10 @@ type Response struct { Msg []byte `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{13} } // BlockState is the payload of Message.SYNC_BLOCK_ADDED. When a VP // commits a new block to the ledger, it will notify its connected NVPs of the @@ -440,9 +458,10 @@ type BlockState struct { StateDelta []byte `protobuf:"bytes,2,opt,name=stateDelta,proto3" json:"stateDelta,omitempty"` } -func (m *BlockState) Reset() { *m = BlockState{} } -func (m *BlockState) String() string { return proto.CompactTextString(m) } -func (*BlockState) ProtoMessage() {} +func (m *BlockState) Reset() { *m = BlockState{} } +func (m *BlockState) String() string { return proto.CompactTextString(m) } +func (*BlockState) ProtoMessage() {} +func (*BlockState) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{14} } func (m *BlockState) GetBlock() *Block { if m != nil { @@ -462,9 +481,10 @@ type SyncBlockRange struct { End uint64 `protobuf:"varint,3,opt,name=end" json:"end,omitempty"` } -func (m *SyncBlockRange) Reset() { *m = SyncBlockRange{} } -func (m *SyncBlockRange) String() string { return proto.CompactTextString(m) } -func (*SyncBlockRange) ProtoMessage() {} +func (m *SyncBlockRange) Reset() { *m = SyncBlockRange{} } +func (m *SyncBlockRange) String() string { return proto.CompactTextString(m) } +func (*SyncBlockRange) ProtoMessage() {} +func (*SyncBlockRange) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{15} } // SyncBlocks is the payload of Message.SYNC_BLOCKS, where the range // indicates the blocks responded to the request SYNC_GET_BLOCKS @@ -473,9 +493,10 @@ type SyncBlocks struct { Blocks []*Block `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` } -func (m *SyncBlocks) Reset() { *m = SyncBlocks{} } -func (m *SyncBlocks) String() string { return proto.CompactTextString(m) } -func (*SyncBlocks) ProtoMessage() {} +func (m *SyncBlocks) Reset() { *m = SyncBlocks{} } +func (m *SyncBlocks) String() string { return proto.CompactTextString(m) } +func (*SyncBlocks) ProtoMessage() {} +func (*SyncBlocks) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{16} } func (m *SyncBlocks) GetRange() *SyncBlockRange { if m != nil { @@ -496,9 +517,10 @@ type SyncStateSnapshotRequest struct { CorrelationId uint64 `protobuf:"varint,1,opt,name=correlationId" json:"correlationId,omitempty"` } -func (m *SyncStateSnapshotRequest) Reset() { *m = SyncStateSnapshotRequest{} } -func (m *SyncStateSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*SyncStateSnapshotRequest) ProtoMessage() {} +func (m *SyncStateSnapshotRequest) Reset() { *m = SyncStateSnapshotRequest{} } +func (m *SyncStateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SyncStateSnapshotRequest) ProtoMessage() {} +func (*SyncStateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{17} } // SyncStateSnapshot is the payload of Message.SYNC_SNAPSHOT, which is a response // to penchainMessage.SYNC_GET_SNAPSHOT. It contains the snapshot or a chunk of the @@ -511,9 +533,10 @@ type SyncStateSnapshot struct { Request *SyncStateSnapshotRequest `protobuf:"bytes,4,opt,name=request" json:"request,omitempty"` } -func (m *SyncStateSnapshot) Reset() { *m = SyncStateSnapshot{} } -func (m *SyncStateSnapshot) String() string { return proto.CompactTextString(m) } -func (*SyncStateSnapshot) ProtoMessage() {} +func (m *SyncStateSnapshot) Reset() { *m = SyncStateSnapshot{} } +func (m *SyncStateSnapshot) String() string { return proto.CompactTextString(m) } +func (*SyncStateSnapshot) ProtoMessage() {} +func (*SyncStateSnapshot) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{18} } func (m *SyncStateSnapshot) GetRequest() *SyncStateSnapshotRequest { if m != nil { @@ -530,9 +553,10 @@ type SyncStateDeltasRequest struct { Range *SyncBlockRange `protobuf:"bytes,1,opt,name=range" json:"range,omitempty"` } -func (m *SyncStateDeltasRequest) Reset() { *m = SyncStateDeltasRequest{} } -func (m *SyncStateDeltasRequest) String() string { return proto.CompactTextString(m) } -func (*SyncStateDeltasRequest) ProtoMessage() {} +func (m *SyncStateDeltasRequest) Reset() { *m = SyncStateDeltasRequest{} } +func (m *SyncStateDeltasRequest) String() string { return proto.CompactTextString(m) } +func (*SyncStateDeltasRequest) ProtoMessage() {} +func (*SyncStateDeltasRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{19} } func (m *SyncStateDeltasRequest) GetRange() *SyncBlockRange { if m != nil { @@ -548,9 +572,10 @@ type SyncStateDeltas struct { Deltas [][]byte `protobuf:"bytes,2,rep,name=deltas,proto3" json:"deltas,omitempty"` } -func (m *SyncStateDeltas) Reset() { *m = SyncStateDeltas{} } -func (m *SyncStateDeltas) String() string { return proto.CompactTextString(m) } -func (*SyncStateDeltas) ProtoMessage() {} +func (m *SyncStateDeltas) Reset() { *m = SyncStateDeltas{} } +func (m *SyncStateDeltas) String() string { return proto.CompactTextString(m) } +func (*SyncStateDeltas) ProtoMessage() {} +func (*SyncStateDeltas) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{20} } func (m *SyncStateDeltas) GetRange() *SyncBlockRange { if m != nil { @@ -560,6 +585,27 @@ func (m *SyncStateDeltas) GetRange() *SyncBlockRange { } func init() { + proto.RegisterType((*Transaction)(nil), "protos.Transaction") + proto.RegisterType((*TransactionBlock)(nil), "protos.TransactionBlock") + proto.RegisterType((*TransactionResult)(nil), "protos.TransactionResult") + proto.RegisterType((*Block)(nil), "protos.Block") + proto.RegisterType((*BlockchainInfo)(nil), "protos.BlockchainInfo") + proto.RegisterType((*NonHashData)(nil), "protos.NonHashData") + proto.RegisterType((*PeerAddress)(nil), "protos.PeerAddress") + proto.RegisterType((*PeerID)(nil), "protos.PeerID") + proto.RegisterType((*PeerEndpoint)(nil), "protos.PeerEndpoint") + proto.RegisterType((*PeersMessage)(nil), "protos.PeersMessage") + proto.RegisterType((*PeersAddresses)(nil), "protos.PeersAddresses") + proto.RegisterType((*HelloMessage)(nil), "protos.HelloMessage") + proto.RegisterType((*Message)(nil), "protos.Message") + proto.RegisterType((*Response)(nil), "protos.Response") + proto.RegisterType((*BlockState)(nil), "protos.BlockState") + proto.RegisterType((*SyncBlockRange)(nil), "protos.SyncBlockRange") + proto.RegisterType((*SyncBlocks)(nil), "protos.SyncBlocks") + proto.RegisterType((*SyncStateSnapshotRequest)(nil), "protos.SyncStateSnapshotRequest") + proto.RegisterType((*SyncStateSnapshot)(nil), "protos.SyncStateSnapshot") + proto.RegisterType((*SyncStateDeltasRequest)(nil), "protos.SyncStateDeltasRequest") + proto.RegisterType((*SyncStateDeltas)(nil), "protos.SyncStateDeltas") proto.RegisterEnum("protos.Transaction_Type", Transaction_Type_name, Transaction_Type_value) proto.RegisterEnum("protos.PeerEndpoint_Type", PeerEndpoint_Type_name, PeerEndpoint_Type_value) proto.RegisterEnum("protos.Message_Type", Message_Type_name, Message_Type_value) @@ -570,6 +616,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Peer service type PeerClient interface { @@ -668,16 +718,22 @@ func (x *peerChatServer) Recv() (*Message, error) { return m, nil } -func _Peer_ProcessTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Peer_ProcessTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Transaction) if err := dec(in); err != nil { return nil, err } - out, err := srv.(PeerServer).ProcessTransaction(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(PeerServer).ProcessTransaction(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Peer/ProcessTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeerServer).ProcessTransaction(ctx, req.(*Transaction)) + } + return interceptor(ctx, in, info, handler) } var _Peer_serviceDesc = grpc.ServiceDesc{ @@ -697,4 +753,103 @@ var _Peer_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, + Metadata: fileDescriptor6, +} + +func init() { proto.RegisterFile("fabric.proto", fileDescriptor6) } + +var fileDescriptor6 = []byte{ + // 1463 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x6e, 0xdb, 0xc6, + 0x13, 0x8f, 0x3e, 0x1d, 0x8d, 0x64, 0x99, 0xde, 0x38, 0x09, 0xe3, 0x04, 0xf9, 0x07, 0xfc, 0xb7, + 0x40, 0x50, 0xb4, 0x4a, 0xe1, 0xa0, 0x48, 0x10, 0xa0, 0x45, 0x14, 0x89, 0x8e, 0x85, 0x28, 0x94, + 0xb3, 0x94, 0x1d, 0xa4, 0x87, 0x1a, 0xb4, 0xb4, 0xb6, 0x89, 0x48, 0xa4, 0x4a, 0x52, 0x41, 0x7d, + 0xed, 0xa9, 0xe8, 0x6b, 0xf4, 0xd8, 0x63, 0x9f, 0xa1, 0x1f, 0x8f, 0xd0, 0xb7, 0xe8, 0xa5, 0x0f, + 0xd0, 0xd9, 0x59, 0x92, 0x22, 0x65, 0x25, 0x4d, 0x7a, 0xb1, 0x39, 0x5f, 0xbb, 0xf3, 0xf1, 0x9b, + 0x99, 0x15, 0x34, 0x4e, 0x9c, 0xe3, 0xc0, 0x1d, 0xb5, 0x66, 0x81, 0x1f, 0xf9, 0xac, 0x4a, 0xff, + 0xc2, 0xed, 0x8d, 0xd1, 0x99, 0xe3, 0x7a, 0x23, 0x7f, 0x2c, 0x94, 0x60, 0x7b, 0x2b, 0x65, 0x88, + 0x37, 0xc2, 0x8b, 0x62, 0xee, 0xff, 0x4e, 0x7d, 0xff, 0x74, 0x22, 0xee, 0x11, 0x75, 0x3c, 0x3f, + 0xb9, 0x17, 0xb9, 0x53, 0x11, 0x46, 0xce, 0x74, 0xa6, 0x14, 0x8c, 0x3f, 0xcb, 0x50, 0x1f, 0x06, + 0x8e, 0x17, 0x3a, 0xa3, 0xc8, 0xf5, 0x3d, 0xf6, 0x29, 0x94, 0xa3, 0xf3, 0x99, 0xd0, 0x0b, 0x77, + 0x0a, 0x77, 0x9b, 0x3b, 0xba, 0xd2, 0x0a, 0x5b, 0x19, 0x95, 0xd6, 0x10, 0xe5, 0x9c, 0xb4, 0xd8, + 0x1d, 0xa8, 0xa7, 0xd7, 0xf6, 0xba, 0x7a, 0x11, 0x8d, 0x1a, 0x3c, 0xcb, 0x62, 0x3a, 0xac, 0xcd, + 0x9c, 0xf3, 0x89, 0xef, 0x8c, 0xf5, 0x12, 0x49, 0x13, 0x92, 0x6d, 0xc3, 0xe5, 0xa9, 0x88, 0x9c, + 0xb1, 0x13, 0x39, 0x7a, 0x99, 0x44, 0x29, 0xcd, 0x18, 0x7a, 0xf1, 0x9d, 0x3b, 0xd6, 0x2b, 0xc8, + 0xaf, 0x71, 0xfa, 0x66, 0x0f, 0xa1, 0x96, 0x3a, 0xaf, 0x57, 0x51, 0x50, 0xdf, 0xd9, 0x6e, 0xa9, + 0xf0, 0x5a, 0x49, 0x78, 0xad, 0x61, 0xa2, 0xc1, 0x17, 0xca, 0x6c, 0x1f, 0xb6, 0x46, 0xbe, 0x77, + 0xe2, 0x8e, 0x31, 0x2f, 0xae, 0x33, 0x71, 0xa3, 0xf3, 0x3e, 0xe6, 0x68, 0xa2, 0xaf, 0x51, 0x8c, + 0xb7, 0x92, 0x18, 0x3b, 0x2b, 0x74, 0xf8, 0x4a, 0x4b, 0xb6, 0x0b, 0xb7, 0x97, 0xf8, 0xfb, 0xf2, + 0x8c, 0x91, 0x3f, 0x39, 0x14, 0x41, 0x88, 0x49, 0xd2, 0x2f, 0x93, 0xe7, 0xff, 0xa2, 0xc5, 0xb6, + 0xa0, 0xe2, 0xf9, 0xde, 0x48, 0xe8, 0x35, 0x4a, 0x80, 0x22, 0x98, 0x01, 0x8d, 0xc8, 0x3f, 0x44, + 0x03, 0xcc, 0x85, 0x1f, 0x84, 0x3a, 0x90, 0x30, 0xc7, 0x93, 0x19, 0x1a, 0x89, 0x20, 0xd2, 0xeb, + 0x24, 0xa3, 0x6f, 0x76, 0x0b, 0x6a, 0xa1, 0x7b, 0xea, 0x39, 0xd1, 0x3c, 0x10, 0x7a, 0x83, 0x04, + 0x0b, 0x86, 0xe1, 0x43, 0x59, 0x56, 0x8e, 0xad, 0x43, 0xed, 0xc0, 0xea, 0x9a, 0xbb, 0x3d, 0xcb, + 0xec, 0x6a, 0x97, 0xd0, 0x05, 0xad, 0xb3, 0xd7, 0xee, 0x59, 0x9d, 0x41, 0xd7, 0x3c, 0xea, 0x9a, + 0xfb, 0xfd, 0xc1, 0x2b, 0xad, 0x90, 0xe7, 0xf6, 0xac, 0xc3, 0xc1, 0x33, 0x53, 0x2b, 0xb2, 0x2b, + 0xb0, 0xb1, 0xe0, 0xbe, 0x38, 0x30, 0xf9, 0x2b, 0xad, 0xc4, 0xae, 0xc3, 0x95, 0x05, 0x73, 0x68, + 0xf2, 0xe7, 0x3d, 0xab, 0x3d, 0x34, 0xb5, 0xb2, 0xf1, 0x0c, 0xb4, 0x0c, 0x6c, 0x9e, 0x4c, 0xfc, + 0xd1, 0x6b, 0xf6, 0x00, 0x43, 0x5b, 0xf0, 0x42, 0x84, 0x59, 0x09, 0xeb, 0x78, 0x65, 0x05, 0xcc, + 0x78, 0x4e, 0xd1, 0xf8, 0xa5, 0x00, 0x9b, 0x59, 0xa9, 0x08, 0xe7, 0x93, 0x28, 0xc5, 0x49, 0x21, + 0x83, 0x93, 0x6b, 0x50, 0x0d, 0x48, 0x1a, 0xc3, 0x31, 0xa6, 0x64, 0x76, 0x44, 0x10, 0xf8, 0x41, + 0x07, 0x81, 0x49, 0x58, 0x5c, 0xe7, 0x0b, 0x86, 0xac, 0x04, 0x11, 0x04, 0xc5, 0x1a, 0x57, 0x04, + 0xfb, 0x0a, 0x9a, 0x29, 0x98, 0x4d, 0xd9, 0x56, 0x84, 0xc8, 0xfa, 0xce, 0xb5, 0x14, 0x33, 0x39, + 0x29, 0x5f, 0xd2, 0x36, 0x7e, 0x2d, 0x42, 0x45, 0x05, 0x8e, 0x7d, 0xf0, 0x26, 0x86, 0x46, 0x81, + 0xee, 0x4e, 0xc8, 0x3c, 0xae, 0x8b, 0x1f, 0x82, 0xeb, 0xe5, 0x64, 0x96, 0xde, 0x33, 0x99, 0x04, + 0x94, 0xc8, 0x89, 0xc4, 0x9e, 0x13, 0x9e, 0xc5, 0xbd, 0xb7, 0x60, 0xe0, 0x08, 0xd8, 0x9c, 0x05, + 0xe2, 0x8d, 0xeb, 0xcf, 0x43, 0xf2, 0x9d, 0xb4, 0x2a, 0xa4, 0x75, 0x51, 0x20, 0xb5, 0x11, 0xe4, + 0xa1, 0xf0, 0xc2, 0x79, 0xf8, 0x3c, 0xe9, 0xe7, 0xaa, 0xd2, 0xbe, 0x20, 0x60, 0x5f, 0x40, 0x1d, + 0x31, 0x2e, 0x0d, 0xbb, 0x52, 0x6f, 0x8d, 0xc2, 0x4d, 0x3d, 0xb6, 0x16, 0x22, 0x9e, 0xd5, 0x33, + 0xbe, 0x2f, 0x40, 0x93, 0xae, 0xa4, 0xfc, 0xf6, 0xbc, 0x13, 0x5f, 0x96, 0xf9, 0x4c, 0xb8, 0xa7, + 0x67, 0x11, 0xe5, 0xb3, 0xcc, 0x63, 0x8a, 0x7d, 0x02, 0xda, 0x68, 0x1e, 0x04, 0x98, 0xfd, 0x85, + 0xf3, 0x0a, 0x08, 0x17, 0xf8, 0xab, 0x23, 0x2d, 0xbd, 0x25, 0x52, 0xe3, 0xe7, 0x02, 0xd4, 0x33, + 0x1e, 0xb2, 0xaf, 0x61, 0x1b, 0x65, 0xce, 0xa4, 0x2f, 0xc6, 0xa7, 0x02, 0x51, 0x34, 0x9d, 0xba, + 0x51, 0x5a, 0x27, 0xf2, 0xea, 0xdd, 0x95, 0x7c, 0x87, 0x35, 0x7b, 0x0c, 0x1b, 0x79, 0x28, 0x85, + 0x18, 0x44, 0xe9, 0x1d, 0xc8, 0x5b, 0x56, 0x37, 0x30, 0xd3, 0xfb, 0x42, 0x04, 0xed, 0xf1, 0x18, + 0xf1, 0x4f, 0xf3, 0xe2, 0xcc, 0x0f, 0xa3, 0xa4, 0x53, 0xe4, 0xb7, 0xe4, 0xcd, 0xfc, 0x40, 0xf5, + 0x49, 0x85, 0xd3, 0xb7, 0x71, 0x0b, 0xaa, 0xd2, 0x0c, 0x27, 0x37, 0x4a, 0x3d, 0x67, 0x2a, 0x12, + 0x0b, 0xf9, 0x6d, 0xfc, 0x56, 0x80, 0x86, 0x14, 0x9b, 0xde, 0x78, 0xe6, 0xbb, 0x5e, 0xc4, 0x6e, + 0x43, 0x11, 0xe7, 0xbe, 0x8a, 0xb5, 0x99, 0xb8, 0xa6, 0x0e, 0xe0, 0x45, 0x97, 0xc6, 0xbf, 0xa3, + 0x3c, 0xa0, 0x5b, 0x6a, 0x3c, 0x21, 0xd9, 0x67, 0xf1, 0xa2, 0x29, 0xd1, 0x10, 0xbe, 0x91, 0xb5, + 0x4d, 0x4e, 0xcf, 0x6e, 0x1a, 0xec, 0xcf, 0xd9, 0x6b, 0x17, 0xef, 0x52, 0x70, 0x55, 0x84, 0xf1, + 0x60, 0xf5, 0x4c, 0x43, 0xf2, 0xb0, 0xdd, 0xef, 0x75, 0xdb, 0xc3, 0x01, 0xc7, 0x61, 0xb6, 0x09, + 0xeb, 0xd6, 0xc0, 0x3a, 0x5a, 0xb0, 0x8a, 0xc6, 0x23, 0x15, 0x07, 0x02, 0x33, 0x0c, 0x9d, 0x53, + 0x81, 0xa8, 0xa9, 0xcc, 0x24, 0x1d, 0x0f, 0xa4, 0xad, 0x55, 0xee, 0x70, 0xa5, 0x62, 0xb4, 0xa0, + 0x49, 0xb6, 0x71, 0x6a, 0x05, 0xf5, 0x93, 0x93, 0x10, 0x74, 0x42, 0x8d, 0x2f, 0x18, 0xc6, 0x0f, + 0x98, 0xb4, 0x3d, 0x31, 0x99, 0xf8, 0xc9, 0x65, 0x0f, 0xa1, 0x31, 0xcb, 0x9c, 0x1b, 0xa7, 0x6f, + 0xf5, 0x9d, 0x39, 0x4d, 0x39, 0x8f, 0x8e, 0x73, 0x6d, 0x10, 0x0f, 0x8c, 0x14, 0x15, 0xf9, 0x26, + 0xe1, 0x4b, 0xda, 0xc6, 0x5f, 0x25, 0x58, 0x4b, 0xbc, 0xb8, 0x9b, 0xdb, 0xf4, 0xe9, 0xed, 0xb1, + 0x38, 0x9b, 0xfb, 0xff, 0x3e, 0xa1, 0xde, 0xbe, 0xfd, 0x73, 0xbb, 0xaa, 0xbc, 0xbc, 0xab, 0x7e, + 0x2f, 0xae, 0x2e, 0x6c, 0x13, 0xa0, 0xdb, 0xb3, 0x3b, 0x47, 0x7b, 0x66, 0xbf, 0x3f, 0xc0, 0xca, + 0xe2, 0x42, 0x22, 0x5a, 0xfe, 0x19, 0x58, 0x96, 0xd9, 0x19, 0xe2, 0x96, 0x62, 0xd0, 0x24, 0xe6, + 0x53, 0x73, 0x78, 0xb4, 0x6f, 0x9a, 0xdc, 0xc6, 0x25, 0x95, 0x18, 0x2a, 0xba, 0xcc, 0x36, 0xa0, + 0x4e, 0xb4, 0x65, 0xbe, 0x7c, 0x6e, 0x3f, 0xd5, 0x2a, 0xec, 0x2a, 0x6c, 0xd2, 0x16, 0x3b, 0x1a, + 0xf2, 0xb6, 0x65, 0xb7, 0x3b, 0xc3, 0xde, 0xc0, 0xd2, 0xaa, 0xf2, 0x02, 0xfb, 0x95, 0xa5, 0xce, + 0x7a, 0xd2, 0x1f, 0x74, 0x9e, 0xd9, 0x5a, 0x5d, 0x1a, 0x13, 0x33, 0x66, 0x34, 0xe4, 0xb6, 0x5c, + 0x30, 0x8e, 0xda, 0xdd, 0x2e, 0x3a, 0xbb, 0xce, 0x6e, 0xc2, 0x75, 0xe2, 0xda, 0x43, 0xdc, 0x87, + 0x74, 0x82, 0x6d, 0xb5, 0xf7, 0xed, 0xbd, 0xc1, 0x50, 0x6b, 0xca, 0xad, 0x99, 0x11, 0xa6, 0x82, + 0x0d, 0x76, 0x03, 0xae, 0x2e, 0x59, 0x75, 0xcd, 0xfe, 0xb0, 0x6d, 0x6b, 0x9a, 0xf4, 0x31, 0x23, + 0x8a, 0xd9, 0x9b, 0xac, 0x01, 0x97, 0xb9, 0x69, 0xef, 0x0f, 0x2c, 0xdb, 0xd4, 0xb6, 0x64, 0xc6, + 0x3a, 0xf2, 0xd3, 0xb2, 0x0f, 0x6c, 0xed, 0xaa, 0xf1, 0x63, 0x01, 0xa5, 0x22, 0x9c, 0xc9, 0x49, + 0xcc, 0xee, 0x43, 0x55, 0x8e, 0xf9, 0x79, 0x18, 0x17, 0xfd, 0x66, 0x52, 0xf4, 0x44, 0xa3, 0x65, + 0x93, 0x58, 0x6e, 0x44, 0x1e, 0xab, 0x32, 0x0d, 0x4a, 0xd3, 0xf0, 0x34, 0x9e, 0xa1, 0xf2, 0x13, + 0xbb, 0x0e, 0x16, 0x7a, 0xcb, 0x25, 0x6a, 0xc0, 0x9a, 0x7d, 0xd0, 0xe9, 0x98, 0xb6, 0xad, 0xfd, + 0x51, 0x90, 0xd4, 0x6e, 0xbb, 0xd7, 0x3f, 0xe0, 0xa6, 0xf6, 0x77, 0xc9, 0x78, 0x01, 0x40, 0x00, + 0x95, 0xd6, 0x82, 0xfd, 0x1f, 0x2a, 0x04, 0xcf, 0x18, 0xff, 0xeb, 0x39, 0x0c, 0x73, 0x25, 0xc3, + 0x01, 0x03, 0xb4, 0x99, 0xba, 0x62, 0x82, 0xfb, 0x42, 0x39, 0x91, 0xe1, 0x18, 0xdf, 0x40, 0xd3, + 0x3e, 0xf7, 0x46, 0xca, 0xc6, 0xf1, 0x10, 0xd7, 0x1f, 0xc1, 0xfa, 0xc8, 0xc7, 0x41, 0x3f, 0x71, + 0xe4, 0xb2, 0xeb, 0x8d, 0xe3, 0xfd, 0x90, 0x67, 0xca, 0x79, 0x82, 0xa7, 0xc4, 0xc3, 0xaf, 0xcc, + 0x15, 0x21, 0x63, 0x15, 0x9e, 0xc2, 0x6a, 0x99, 0xcb, 0x4f, 0xc3, 0xc1, 0x58, 0x93, 0xf3, 0x43, + 0x5c, 0x18, 0x95, 0x40, 0x5e, 0x12, 0xbb, 0x9c, 0xb6, 0x5d, 0xde, 0x05, 0xae, 0x94, 0xd8, 0xc7, + 0x50, 0xa5, 0x20, 0x92, 0xd9, 0xbd, 0x14, 0x61, 0x2c, 0x34, 0x1e, 0x83, 0x2e, 0xed, 0x29, 0x29, + 0xb6, 0xe7, 0xcc, 0xc2, 0x33, 0x3f, 0xe2, 0xe2, 0xdb, 0x39, 0xf6, 0xd0, 0xfb, 0x05, 0x63, 0xfc, + 0x84, 0x8f, 0xa3, 0x0b, 0x47, 0xc8, 0x10, 0xc7, 0x94, 0xb5, 0x82, 0x1a, 0x99, 0x44, 0xc8, 0x67, + 0x77, 0x28, 0x0f, 0x97, 0xaf, 0x4e, 0x15, 0x7b, 0x4a, 0xcb, 0xe7, 0x3c, 0xf9, 0x64, 0xcd, 0xa7, + 0xc7, 0x22, 0x88, 0xd3, 0x90, 0x65, 0xb1, 0x47, 0xb0, 0x16, 0x28, 0xd7, 0xa8, 0x69, 0xeb, 0x3b, + 0x77, 0xb2, 0x29, 0x58, 0x15, 0x02, 0x4f, 0x0c, 0x8c, 0x5d, 0xb8, 0x96, 0x2a, 0x51, 0xf1, 0xc2, + 0x24, 0xca, 0x0f, 0x4a, 0xab, 0xf1, 0x12, 0x7b, 0x32, 0x7f, 0xce, 0x07, 0xd6, 0x05, 0x9f, 0x0e, + 0x94, 0x0b, 0x55, 0x17, 0x7c, 0x21, 0x2a, 0x6a, 0x67, 0x0e, 0x65, 0x39, 0x7b, 0x59, 0x0b, 0xca, + 0xb8, 0x5d, 0x23, 0xb6, 0xb1, 0x34, 0x13, 0xb7, 0x97, 0x19, 0xc6, 0xa5, 0xbb, 0x85, 0xcf, 0x0b, + 0xec, 0x4b, 0x60, 0xf8, 0xb0, 0x1f, 0x21, 0x2f, 0xfb, 0x4b, 0x6a, 0xd5, 0x3b, 0x6c, 0x5b, 0x5b, + 0xee, 0x38, 0xe3, 0xd2, 0xb1, 0xfa, 0x49, 0x77, 0xff, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, + 0xc4, 0xd0, 0x63, 0xe9, 0x0d, 0x00, 0x00, } diff --git a/protos/fabric_next.pb.go b/protos/fabric_next.pb.go index 6754e4245f6..53d3c4eb8b4 100644 --- a/protos/fabric_next.pb.go +++ b/protos/fabric_next.pb.go @@ -7,7 +7,7 @@ package protos import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "google/protobuf" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" @@ -65,6 +65,7 @@ var Message2_Type_value = map[string]int32{ func (x Message2_Type) String() string { return proto.EnumName(Message2_Type_name, int32(x)) } +func (Message2_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{1, 0} } type Proposal_Type int32 @@ -87,6 +88,7 @@ var Proposal_Type_value = map[string]int32{ func (x Proposal_Type) String() string { return proto.EnumName(Proposal_Type_name, int32(x)) } +func (Proposal_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{2, 0} } type InvalidTransaction_Cause int32 @@ -107,6 +109,7 @@ var InvalidTransaction_Cause_value = map[string]int32{ func (x InvalidTransaction_Cause) String() string { return proto.EnumName(InvalidTransaction_Cause_name, int32(x)) } +func (InvalidTransaction_Cause) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{10, 0} } // Envelope is used to deliver a message type Envelope struct { @@ -116,9 +119,10 @@ type Envelope struct { Message *Message2 `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` } -func (m *Envelope) Reset() { *m = Envelope{} } -func (m *Envelope) String() string { return proto.CompactTextString(m) } -func (*Envelope) ProtoMessage() {} +func (m *Envelope) Reset() { *m = Envelope{} } +func (m *Envelope) String() string { return proto.CompactTextString(m) } +func (*Envelope) ProtoMessage() {} +func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } func (m *Envelope) GetMessage() *Message2 { if m != nil { @@ -140,9 +144,10 @@ type Message2 struct { Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"` } -func (m *Message2) Reset() { *m = Message2{} } -func (m *Message2) String() string { return proto.CompactTextString(m) } -func (*Message2) ProtoMessage() {} +func (m *Message2) Reset() { *m = Message2{} } +func (m *Message2) String() string { return proto.CompactTextString(m) } +func (*Message2) ProtoMessage() {} +func (*Message2) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } func (m *Message2) GetTimestamp() *google_protobuf.Timestamp { if m != nil { @@ -162,9 +167,10 @@ type Proposal struct { Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` } -func (m *Proposal) Reset() { *m = Proposal{} } -func (m *Proposal) String() string { return proto.CompactTextString(m) } -func (*Proposal) ProtoMessage() {} +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{2} } // A response with a representation similar to an HTTP response that can // be used within another message. @@ -177,9 +183,10 @@ type Response2 struct { Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` } -func (m *Response2) Reset() { *m = Response2{} } -func (m *Response2) String() string { return proto.CompactTextString(m) } -func (*Response2) ProtoMessage() {} +func (m *Response2) Reset() { *m = Response2{} } +func (m *Response2) String() string { return proto.CompactTextString(m) } +func (*Response2) ProtoMessage() {} +func (*Response2) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{3} } // A SystemChaincode is a chaincode compiled into the peer that cannot // be modified at runtime. These are used to perform critical system level @@ -189,9 +196,10 @@ type SystemChaincode struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } -func (m *SystemChaincode) Reset() { *m = SystemChaincode{} } -func (m *SystemChaincode) String() string { return proto.CompactTextString(m) } -func (*SystemChaincode) ProtoMessage() {} +func (m *SystemChaincode) Reset() { *m = SystemChaincode{} } +func (m *SystemChaincode) String() string { return proto.CompactTextString(m) } +func (*SystemChaincode) ProtoMessage() {} +func (*SystemChaincode) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{4} } // An action to be taken against the ledger. type Action struct { @@ -217,9 +225,10 @@ type Action struct { Vscc *SystemChaincode `protobuf:"bytes,5,opt,name=vscc" json:"vscc,omitempty"` } -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{5} } func (m *Action) GetEscc() *SystemChaincode { if m != nil { @@ -241,9 +250,10 @@ type Endorsement struct { Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *Endorsement) Reset() { *m = Endorsement{} } -func (m *Endorsement) String() string { return proto.CompactTextString(m) } -func (*Endorsement) ProtoMessage() {} +func (m *Endorsement) Reset() { *m = Endorsement{} } +func (m *Endorsement) String() string { return proto.CompactTextString(m) } +func (*Endorsement) ProtoMessage() {} +func (*Endorsement) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{6} } // A ProposalResponse is returned from an endorser to the proposal submitter. type ProposalResponse struct { @@ -257,9 +267,10 @@ type ProposalResponse struct { Endorsement *Endorsement `protobuf:"bytes,3,opt,name=endorsement" json:"endorsement,omitempty"` } -func (m *ProposalResponse) Reset() { *m = ProposalResponse{} } -func (m *ProposalResponse) String() string { return proto.CompactTextString(m) } -func (*ProposalResponse) ProtoMessage() {} +func (m *ProposalResponse) Reset() { *m = ProposalResponse{} } +func (m *ProposalResponse) String() string { return proto.CompactTextString(m) } +func (*ProposalResponse) ProtoMessage() {} +func (*ProposalResponse) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{7} } func (m *ProposalResponse) GetResponse() *Response2 { if m != nil { @@ -296,9 +307,10 @@ type EndorsedAction struct { ProposalBytes []byte `protobuf:"bytes,3,opt,name=proposalBytes,proto3" json:"proposalBytes,omitempty"` } -func (m *EndorsedAction) Reset() { *m = EndorsedAction{} } -func (m *EndorsedAction) String() string { return proto.CompactTextString(m) } -func (*EndorsedAction) ProtoMessage() {} +func (m *EndorsedAction) Reset() { *m = EndorsedAction{} } +func (m *EndorsedAction) String() string { return proto.CompactTextString(m) } +func (*EndorsedAction) ProtoMessage() {} +func (*EndorsedAction) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{8} } func (m *EndorsedAction) GetEndorsements() []*Endorsement { if m != nil { @@ -315,9 +327,10 @@ type Transaction2 struct { EndorsedActions []*EndorsedAction `protobuf:"bytes,1,rep,name=endorsedActions" json:"endorsedActions,omitempty"` } -func (m *Transaction2) Reset() { *m = Transaction2{} } -func (m *Transaction2) String() string { return proto.CompactTextString(m) } -func (*Transaction2) ProtoMessage() {} +func (m *Transaction2) Reset() { *m = Transaction2{} } +func (m *Transaction2) String() string { return proto.CompactTextString(m) } +func (*Transaction2) ProtoMessage() {} +func (*Transaction2) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{9} } func (m *Transaction2) GetEndorsedActions() []*EndorsedAction { if m != nil { @@ -332,9 +345,10 @@ type InvalidTransaction struct { Cause InvalidTransaction_Cause `protobuf:"varint,2,opt,name=cause,enum=protos.InvalidTransaction_Cause" json:"cause,omitempty"` } -func (m *InvalidTransaction) Reset() { *m = InvalidTransaction{} } -func (m *InvalidTransaction) String() string { return proto.CompactTextString(m) } -func (*InvalidTransaction) ProtoMessage() {} +func (m *InvalidTransaction) Reset() { *m = InvalidTransaction{} } +func (m *InvalidTransaction) String() string { return proto.CompactTextString(m) } +func (*InvalidTransaction) ProtoMessage() {} +func (*InvalidTransaction) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{10} } func (m *InvalidTransaction) GetTransaction() *Transaction2 { if m != nil { @@ -345,16 +359,29 @@ func (m *InvalidTransaction) GetTransaction() *Transaction2 { // Block contains a list of transactions and the crypto hash of previous block type Block2 struct { - PreviousBlockHash []byte `protobuf:"bytes,1,opt,name=PreviousBlockHash,proto3" json:"PreviousBlockHash,omitempty"` + PreviousBlockHash []byte `protobuf:"bytes,1,opt,name=PreviousBlockHash,json=previousBlockHash,proto3" json:"PreviousBlockHash,omitempty"` // transactions are stored in serialized form so that the concenters can avoid marshaling of transactions - Transactions [][]byte `protobuf:"bytes,2,rep,name=Transactions,proto3" json:"Transactions,omitempty"` + Transactions [][]byte `protobuf:"bytes,2,rep,name=Transactions,json=transactions,proto3" json:"Transactions,omitempty"` } -func (m *Block2) Reset() { *m = Block2{} } -func (m *Block2) String() string { return proto.CompactTextString(m) } -func (*Block2) ProtoMessage() {} +func (m *Block2) Reset() { *m = Block2{} } +func (m *Block2) String() string { return proto.CompactTextString(m) } +func (*Block2) ProtoMessage() {} +func (*Block2) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{11} } func init() { + proto.RegisterType((*Envelope)(nil), "protos.Envelope") + proto.RegisterType((*Message2)(nil), "protos.Message2") + proto.RegisterType((*Proposal)(nil), "protos.Proposal") + proto.RegisterType((*Response2)(nil), "protos.Response2") + proto.RegisterType((*SystemChaincode)(nil), "protos.SystemChaincode") + proto.RegisterType((*Action)(nil), "protos.Action") + proto.RegisterType((*Endorsement)(nil), "protos.Endorsement") + proto.RegisterType((*ProposalResponse)(nil), "protos.ProposalResponse") + proto.RegisterType((*EndorsedAction)(nil), "protos.EndorsedAction") + proto.RegisterType((*Transaction2)(nil), "protos.Transaction2") + proto.RegisterType((*InvalidTransaction)(nil), "protos.InvalidTransaction") + proto.RegisterType((*Block2)(nil), "protos.Block2") proto.RegisterEnum("protos.Message2_Type", Message2_Type_name, Message2_Type_value) proto.RegisterEnum("protos.Proposal_Type", Proposal_Type_name, Proposal_Type_value) proto.RegisterEnum("protos.InvalidTransaction_Cause", InvalidTransaction_Cause_name, InvalidTransaction_Cause_value) @@ -364,6 +391,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Endorser service type EndorserClient interface { @@ -397,16 +428,22 @@ func RegisterEndorserServer(s *grpc.Server, srv EndorserServer) { s.RegisterService(&_Endorser_serviceDesc, srv) } -func _Endorser_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Endorser_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Proposal) if err := dec(in); err != nil { return nil, err } - out, err := srv.(EndorserServer).ProcessProposal(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(EndorserServer).ProcessProposal(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Endorser/ProcessProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EndorserServer).ProcessProposal(ctx, req.(*Proposal)) + } + return interceptor(ctx, in, info, handler) } var _Endorser_serviceDesc = grpc.ServiceDesc{ @@ -418,5 +455,63 @@ var _Endorser_serviceDesc = grpc.ServiceDesc{ Handler: _Endorser_ProcessProposal_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor5, +} + +func init() { proto.RegisterFile("fabric_next.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 811 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x55, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x0e, 0xad, 0x1f, 0x4b, 0x43, 0xc5, 0xa6, 0xd6, 0x8d, 0x23, 0x18, 0x05, 0xea, 0x12, 0x39, + 0xb4, 0x4d, 0xab, 0x00, 0x2c, 0xfa, 0x83, 0x5e, 0x5a, 0x85, 0x62, 0x11, 0xa1, 0xa9, 0x64, 0xac, + 0x98, 0x06, 0xe9, 0x25, 0xa0, 0xc9, 0xb5, 0x42, 0x94, 0xe2, 0x12, 0xdc, 0xa5, 0x60, 0x3d, 0x40, + 0x8f, 0x3d, 0xf4, 0xda, 0xd7, 0xe8, 0x0b, 0xf4, 0xdc, 0xa7, 0xea, 0xee, 0x92, 0x2b, 0x51, 0x34, + 0x9c, 0x9e, 0xa4, 0x99, 0xf9, 0x76, 0xbe, 0x6f, 0x66, 0x76, 0x96, 0x30, 0xbc, 0x09, 0xae, 0xf3, + 0x38, 0x7c, 0x9b, 0x92, 0x5b, 0x3e, 0xce, 0x72, 0xca, 0x29, 0xea, 0xaa, 0x1f, 0x76, 0xf1, 0xd1, + 0x8a, 0xd2, 0x55, 0x42, 0x9e, 0x29, 0xf3, 0xba, 0xb8, 0x79, 0xc6, 0xe3, 0x35, 0x61, 0x3c, 0x58, + 0x67, 0x25, 0xd0, 0xf6, 0xa1, 0xe7, 0xa5, 0x1b, 0x92, 0xd0, 0x8c, 0xa0, 0x0f, 0xa1, 0xcf, 0xe2, + 0x55, 0x1a, 0xf0, 0x22, 0x27, 0x23, 0xe3, 0xd2, 0xf8, 0x64, 0x80, 0xf7, 0x0e, 0xf4, 0x19, 0x1c, + 0x8b, 0xa3, 0x2c, 0x58, 0x91, 0xd1, 0x91, 0x88, 0x99, 0x8e, 0x55, 0xa6, 0x60, 0xe3, 0x9f, 0x4b, + 0xb7, 0x83, 0x35, 0xc0, 0xfe, 0xfb, 0x08, 0x7a, 0xda, 0x8b, 0x3e, 0x85, 0x36, 0xdf, 0x66, 0x65, + 0xc6, 0x13, 0xe7, 0x51, 0xf3, 0xd4, 0xd8, 0x17, 0x41, 0xac, 0x20, 0x68, 0x04, 0xc7, 0x1b, 0x92, + 0xb3, 0x98, 0xa6, 0x8a, 0xa3, 0x83, 0xb5, 0x89, 0xbe, 0x85, 0xfe, 0x4e, 0xfa, 0xa8, 0xa5, 0xf8, + 0x2f, 0xc6, 0x65, 0x71, 0x63, 0x5d, 0xdc, 0xd8, 0xd7, 0x08, 0xbc, 0x07, 0xcb, 0x9c, 0x59, 0xb0, + 0x4d, 0x68, 0x10, 0x8d, 0xda, 0xaa, 0x26, 0x6d, 0xda, 0x7f, 0x18, 0xd0, 0x96, 0xe4, 0xe8, 0x21, + 0xf4, 0x5f, 0xcd, 0xa7, 0xde, 0x8f, 0xb3, 0xb9, 0x37, 0xb5, 0x1e, 0x48, 0x73, 0x3a, 0x5b, 0xba, + 0x8b, 0x5f, 0x3c, 0xfc, 0xc6, 0x32, 0x50, 0x0f, 0xda, 0xcb, 0x37, 0x73, 0xd7, 0x3a, 0x42, 0x03, + 0xe8, 0x5d, 0xe1, 0xc5, 0xd5, 0x62, 0x39, 0x79, 0x69, 0xb5, 0x90, 0x05, 0x03, 0x6d, 0xbd, 0x5d, + 0x7a, 0xbe, 0xd5, 0x46, 0x67, 0x70, 0xba, 0xf3, 0x60, 0x6f, 0xf9, 0xea, 0xa5, 0x6f, 0x75, 0xd0, + 0x63, 0x38, 0xab, 0xc3, 0x74, 0xa0, 0x8b, 0x4e, 0xc1, 0xf4, 0xf1, 0x64, 0xbe, 0x9c, 0xb8, 0xfe, + 0x6c, 0x31, 0xb7, 0x8e, 0xed, 0xdf, 0x0d, 0x91, 0x3f, 0xa7, 0x19, 0x65, 0x41, 0x72, 0x5f, 0xd7, + 0x74, 0xbc, 0xde, 0xb5, 0x13, 0x38, 0x8a, 0x23, 0xd5, 0xb0, 0x3e, 0x16, 0xff, 0xea, 0x15, 0xb7, + 0x0e, 0x2b, 0x7e, 0x72, 0x6f, 0xc1, 0xee, 0x8b, 0xc9, 0x6c, 0xee, 0x2e, 0xa6, 0x9e, 0x65, 0xd8, + 0xaf, 0xa1, 0x8f, 0x09, 0xcb, 0x68, 0xca, 0xc4, 0xf4, 0xce, 0xa1, 0x2b, 0xfa, 0xc8, 0x0b, 0xa6, + 0x94, 0x74, 0x70, 0x65, 0x49, 0x92, 0xfa, 0x75, 0xe8, 0xef, 0x86, 0xff, 0x1e, 0xfa, 0x8f, 0xe1, + 0x74, 0xb9, 0x65, 0x9c, 0xac, 0xdd, 0x77, 0x41, 0x9c, 0x86, 0x34, 0xd2, 0xda, 0x0d, 0xad, 0xdd, + 0xfe, 0xd7, 0x80, 0xee, 0x24, 0xe4, 0x72, 0xe4, 0x36, 0x0c, 0xb2, 0xaa, 0xda, 0x17, 0x01, 0x7b, + 0x57, 0xdd, 0xc8, 0x03, 0x9f, 0xb8, 0x94, 0x16, 0x8b, 0xd7, 0x45, 0x12, 0xc8, 0x13, 0x42, 0x74, + 0x91, 0x70, 0x25, 0x67, 0x80, 0xef, 0xf8, 0x65, 0x25, 0x64, 0x43, 0x52, 0xce, 0x84, 0xac, 0x96, + 0x40, 0x54, 0x16, 0x7a, 0x0a, 0x6d, 0xc2, 0xc2, 0x50, 0xdd, 0x0e, 0xd3, 0x79, 0xac, 0x3b, 0xdd, + 0x50, 0x8a, 0x15, 0x48, 0x82, 0x37, 0x12, 0xdc, 0xf9, 0x1f, 0xb0, 0x04, 0xd9, 0x4f, 0xc1, 0xf4, + 0xd2, 0x88, 0xe6, 0x8c, 0xac, 0x05, 0xd3, 0xfb, 0xf7, 0xcb, 0xfe, 0xcb, 0x00, 0x4b, 0x4f, 0x57, + 0xb7, 0x1f, 0x7d, 0x01, 0xbd, 0xbc, 0xfa, 0xaf, 0x4e, 0x98, 0xce, 0x50, 0x53, 0xee, 0x46, 0x84, + 0x77, 0x10, 0x74, 0x09, 0x66, 0xa0, 0x9a, 0xf7, 0x7c, 0xcb, 0x09, 0xab, 0x3a, 0x51, 0x77, 0xa1, + 0xaf, 0xc0, 0x24, 0x7b, 0x49, 0xd5, 0x26, 0x9d, 0xe9, 0x9c, 0x35, 0xb5, 0xb8, 0x8e, 0xb3, 0xff, + 0x34, 0xe0, 0xa4, 0x0a, 0x46, 0xd5, 0x78, 0x1a, 0x5c, 0xc6, 0x5d, 0xae, 0x6f, 0x60, 0x50, 0xcb, + 0x21, 0xe5, 0xb4, 0xee, 0x23, 0x3b, 0x00, 0xa2, 0x27, 0xf0, 0x50, 0x4f, 0xb9, 0x4c, 0x5e, 0xde, + 0xa3, 0x43, 0xa7, 0x7d, 0x05, 0x03, 0x3f, 0x0f, 0x52, 0x56, 0x52, 0x3a, 0xe8, 0x07, 0x38, 0x25, + 0x07, 0x12, 0xa5, 0x28, 0xc9, 0x78, 0xde, 0x60, 0xac, 0xc2, 0xb8, 0x09, 0xb7, 0xff, 0x31, 0x00, + 0xcd, 0xd2, 0x4d, 0x90, 0xc4, 0x51, 0x2d, 0x33, 0xfa, 0x1a, 0x4c, 0xbe, 0x37, 0xab, 0x39, 0x7c, + 0xa0, 0x93, 0xd6, 0x35, 0xe0, 0x3a, 0x50, 0x9c, 0xeb, 0x84, 0x41, 0xc1, 0xca, 0x05, 0x39, 0x71, + 0x2e, 0xf5, 0x89, 0xbb, 0x14, 0x63, 0x57, 0xe2, 0x70, 0x09, 0xb7, 0xbf, 0x83, 0x8e, 0xb2, 0xd1, + 0x23, 0x18, 0xfa, 0xb7, 0xb3, 0x68, 0x92, 0xe4, 0x24, 0x88, 0xb6, 0xde, 0x6d, 0xcc, 0x38, 0x13, + 0xeb, 0x7a, 0x01, 0xe7, 0xf8, 0xb5, 0x4b, 0xd3, 0x9b, 0x24, 0x0e, 0xf9, 0xb4, 0xc8, 0xe3, 0x74, + 0xe5, 0xd2, 0xf5, 0x3a, 0xe6, 0x62, 0x77, 0x7f, 0x85, 0xee, 0xf3, 0x84, 0x86, 0xbf, 0x39, 0xe8, + 0x73, 0x18, 0x5e, 0xe5, 0x64, 0x13, 0xd3, 0x82, 0x29, 0x4f, 0x6d, 0x87, 0x86, 0x59, 0x33, 0x20, + 0x97, 0xad, 0xa6, 0xa7, 0x9c, 0x95, 0x58, 0xb6, 0x5a, 0x39, 0xcc, 0xf9, 0x49, 0x7e, 0x2b, 0x54, + 0xc7, 0x72, 0xf4, 0xbd, 0x78, 0xea, 0x72, 0x1a, 0x8a, 0x95, 0xdf, 0xbd, 0x58, 0x56, 0xf3, 0x8d, + 0xba, 0x18, 0x35, 0x3d, 0xfa, 0xce, 0xda, 0x0f, 0xae, 0xcb, 0x2f, 0xd4, 0x97, 0xff, 0x05, 0x00, + 0x00, 0xff, 0xff, 0x1e, 0x94, 0x2e, 0x4c, 0xbd, 0x06, 0x00, 0x00, } diff --git a/protos/server_admin.pb.go b/protos/server_admin.pb.go index 56225e7976f..2e7d1db9b48 100644 --- a/protos/server_admin.pb.go +++ b/protos/server_admin.pb.go @@ -7,7 +7,7 @@ package protos import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf1 "google/protobuf" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" import ( context "golang.org/x/net/context" @@ -50,16 +50,19 @@ var ServerStatus_StatusCode_value = map[string]int32{ func (x ServerStatus_StatusCode) String() string { return proto.EnumName(ServerStatus_StatusCode_name, int32(x)) } +func (ServerStatus_StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor7, []int{0, 0} } type ServerStatus struct { Status ServerStatus_StatusCode `protobuf:"varint,1,opt,name=status,enum=protos.ServerStatus_StatusCode" json:"status,omitempty"` } -func (m *ServerStatus) Reset() { *m = ServerStatus{} } -func (m *ServerStatus) String() string { return proto.CompactTextString(m) } -func (*ServerStatus) ProtoMessage() {} +func (m *ServerStatus) Reset() { *m = ServerStatus{} } +func (m *ServerStatus) String() string { return proto.CompactTextString(m) } +func (*ServerStatus) ProtoMessage() {} +func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } func init() { + proto.RegisterType((*ServerStatus)(nil), "protos.ServerStatus") proto.RegisterEnum("protos.ServerStatus_StatusCode", ServerStatus_StatusCode_name, ServerStatus_StatusCode_value) } @@ -67,6 +70,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Admin service type AdminClient interface { @@ -124,40 +131,58 @@ func RegisterAdminServer(s *grpc.Server, srv AdminServer) { s.RegisterService(&_Admin_serviceDesc, srv) } -func _Admin_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Admin_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(AdminServer).GetStatus(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(AdminServer).GetStatus(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Admin/GetStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServer).GetStatus(ctx, req.(*google_protobuf1.Empty)) + } + return interceptor(ctx, in, info, handler) } -func _Admin_StartServer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Admin_StartServer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(AdminServer).StartServer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(AdminServer).StartServer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Admin/StartServer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServer).StartServer(ctx, req.(*google_protobuf1.Empty)) + } + return interceptor(ctx, in, info, handler) } -func _Admin_StopServer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Admin_StopServer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(AdminServer).StopServer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(AdminServer).StopServer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.Admin/StopServer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServer).StopServer(ctx, req.(*google_protobuf1.Empty)) + } + return interceptor(ctx, in, info, handler) } var _Admin_serviceDesc = grpc.ServiceDesc{ @@ -177,5 +202,28 @@ var _Admin_serviceDesc = grpc.ServiceDesc{ Handler: _Admin_StopServer_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor7, +} + +func init() { proto.RegisterFile("server_admin.proto", fileDescriptor7) } + +var fileDescriptor7 = []byte{ + // 248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x4e, 0x2d, 0x2a, + 0x4b, 0x2d, 0x8a, 0x4f, 0x4c, 0xc9, 0xcd, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, + 0x03, 0x53, 0xc5, 0x52, 0xd2, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0x6e, 0x52, 0x69, + 0x9a, 0x7e, 0x6a, 0x6e, 0x41, 0x49, 0x25, 0x44, 0x91, 0xd2, 0x22, 0x46, 0x2e, 0x9e, 0x60, 0xb0, + 0xde, 0xe0, 0x92, 0xc4, 0x92, 0xd2, 0x62, 0x21, 0x73, 0x2e, 0xb6, 0x62, 0x30, 0x4b, 0x82, 0x51, + 0x81, 0x51, 0x83, 0xcf, 0x48, 0x1e, 0xa2, 0xb0, 0x58, 0x0f, 0x59, 0x95, 0x1e, 0x84, 0x72, 0xce, + 0x4f, 0x49, 0x0d, 0x82, 0x2a, 0x57, 0x8a, 0xe4, 0xe2, 0x42, 0x88, 0x0a, 0xf1, 0x72, 0x71, 0x86, + 0xfa, 0xb9, 0xb8, 0xba, 0x79, 0xfa, 0xb9, 0xba, 0x08, 0x30, 0x08, 0x71, 0x73, 0xb1, 0x07, 0x87, + 0x38, 0x06, 0x85, 0x00, 0x39, 0x8c, 0x10, 0x8e, 0x7f, 0x40, 0x00, 0x90, 0xc3, 0x24, 0xc4, 0xc5, + 0xc5, 0x16, 0xe0, 0x18, 0x1a, 0x0c, 0x64, 0x33, 0x0b, 0x71, 0x72, 0xb1, 0xba, 0x06, 0x05, 0xf9, + 0x07, 0x09, 0xb0, 0x80, 0xd4, 0x84, 0xfa, 0x79, 0xfb, 0xf9, 0x87, 0xfb, 0x09, 0xb0, 0x1a, 0x1d, + 0x64, 0xe4, 0x62, 0x75, 0x04, 0xf9, 0x4c, 0xc8, 0x9a, 0x8b, 0xd3, 0x3d, 0xb5, 0x04, 0xea, 0x54, + 0x31, 0x3d, 0x88, 0xcf, 0xf4, 0x60, 0x3e, 0xd3, 0x73, 0x05, 0xf9, 0x4c, 0x4a, 0x04, 0x9b, 0x93, + 0x95, 0x18, 0x84, 0x6c, 0xb9, 0xb8, 0x81, 0xec, 0xa2, 0x12, 0x88, 0x30, 0xc9, 0xda, 0x6d, 0x40, + 0x1e, 0xcc, 0x2f, 0x20, 0x4f, 0x77, 0x12, 0x24, 0x36, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x5b, 0x2d, 0xff, 0xc2, 0xaa, 0x01, 0x00, 0x00, } diff --git a/protos/transaction.go b/protos/transaction.go index 49360800b79..2d627734992 100644 --- a/protos/transaction.go +++ b/protos/transaction.go @@ -17,6 +17,7 @@ limitations under the License. package protos import ( + "encoding/json" "fmt" "github.com/golang/protobuf/proto" @@ -111,3 +112,24 @@ func NewChaincodeExecute(chaincodeInvocationSpec *ChaincodeInvocationSpec, uuid transaction.Payload = data return transaction, nil } + +type strArgs struct { + Function string + Args []string +} + +// UnmarshalJSON converts the string-based REST/JSON input to +// the []byte-based current ChaincodeInput structure. +func (c *ChaincodeInput) UnmarshalJSON(b []byte) error { + sa := &strArgs{} + err := json.Unmarshal(b, sa) + if err != nil { + return err + } + allArgs := sa.Args + if sa.Function != "" { + allArgs = append([]string{sa.Function}, sa.Args...) + } + c.Args = util.ToChaincodeArgs(allArgs...) + return nil +} diff --git a/scripts/goListFiles.sh b/scripts/goListFiles.sh new file mode 100755 index 00000000000..13730e7878d --- /dev/null +++ b/scripts/goListFiles.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +target=$1 + +deps=`go list -f '{{ join .Deps "\n" }}' $target` + +find $GOPATH/src/$target -type f + +for dep in $deps; +do + importpath=$GOPATH/src/$dep + if [ -d $importpath ]; + then + find $importpath -type f + fi +done diff --git a/scripts/goUnitTests.sh b/scripts/goUnitTests.sh index bf2f7def2c6..5b4a05b01c7 100755 --- a/scripts/goUnitTests.sh +++ b/scripts/goUnitTests.sh @@ -2,15 +2,33 @@ set -e +TAG=$1 +GO_LDFLAGS=$2 + +BASEIMAGE="hyperledger/fabric-peer" +IMAGE=$BASEIMAGE + +if [ "$TAG" != "" ] +then + IMAGE="$BASEIMAGE:$TAG" +fi + +echo "Running unit tests using $IMAGE" + echo "Cleaning membership services folder" rm -rf membersrvc/ca/.ca/ echo -n "Obtaining list of tests to run.." -PKGS=`go list github.com/hyperledger/fabric/... | grep -v /vendor/ | grep -v /examples/` +# Some examples don't play nice with `go test` +PKGS=`go list github.com/hyperledger/fabric/... | grep -v /vendor/ | \ + grep -v /examples/chaincode/chaintool/ | \ + grep -v /examples/chaincode/go/asset_management | \ + grep -v /examples/chaincode/go/utxo | \ + grep -v /examples/chaincode/go/rbac_tcerts_no_attrs` echo "DONE!" echo -n "Starting peer.." -CID=`docker run -dit -p 7051:7051 hyperledger/fabric-peer peer node start` +CID=`docker run -dit -p 7051:7051 $IMAGE peer node start` cleanup() { echo "Stopping peer.." docker kill $CID 2>&1 > /dev/null @@ -19,4 +37,4 @@ trap cleanup 0 echo "DONE!" echo "Running tests..." -gocov test $PKGS -p 1 -timeout=20m | gocov-xml > report.xml +gocov test -ldflags "$GO_LDFLAGS" $PKGS -p 1 -timeout=20m | gocov-xml > report.xml diff --git a/scripts/provision/common.sh b/scripts/provision/common.sh deleted file mode 100755 index 0efe7e63570..00000000000 --- a/scripts/provision/common.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Add any logic that is common to both the peer and docker environments here - -apt-get update -qq - -# Used by CHAINTOOL -apt-get install -y default-jre diff --git a/scripts/provision/docker.sh b/scripts/provision/docker.sh deleted file mode 100755 index 8ed21a7ca14..00000000000 --- a/scripts/provision/docker.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -# --------------------------------------------------------------------------- -# Install the hyperledger/fabric-baseimage docker environment -# --------------------------------------------------------------------------- -# -# There are some interesting things to note here: -# -# 1) Note that we take the slightly unorthodox route of _not_ publishing -# a "latest" tag to dockerhub. Rather, we only publish specifically -# versioned images and we build the notion of "latest" here locally -# during provisioning. This is because the notion of always -# pulling the latest/greatest from the net doesn't really apply to us; -# we always want a coupling between the fabric and the docker environment. -# At the same time, requiring each and every Dockerfile to pull a specific -# version adds overhead to the Dockerfile generation logic. Therefore, -# we employ a hybrid solution that capitalizes on how docker treats the -# "latest" tag. That is, untagged references implicitly assume the tag -# "latest" (good for simple Dockerfiles), but will satisfy the tag from -# the local cache before going to the net (good for helping us control -# what "latest" means locally) -# -# A good blog entry covering the mechanism being exploited may be found here: -# -# http://container-solutions.com/docker-latest-confusion -# -# 2) A benefit of (1) is that we now have a convenient vehicle for performing -# JIT customizations of our docker image during provisioning just like we -# do for vagrant. For example, we can install new packages in docker within -# this script. We will capitalize on this in future patches. -# -# 3) Note that we do some funky processing of the environment (see "printenv" -# and "ENV" components below). Whats happening is we are providing a vehicle -# for allowing the baseimage to include environmental definitions using -# standard linux mechanisms (e.g. /etc/profile.d). The problem is that -# docker-run by default runs a non-login/non-interactive /bin/dash shell -# which omits any normal /etc/profile or ~/.bashrc type processing, including -# environment variable definitions. So what we do is we force the execution -# of an interactive shell and extract the defined environment variables -# (via "printenv") and then re-inject them (using Dockerfile::ENV) in a -# manner that will make them visible to a non-interactive DASH shell. -# -# This helps for things like defining things such as the GOPATH. -# -# An alternative would be to bake any Dockerfile::ENV items in during -# baseimage creation, but packer lacks the capability to do so, so this -# is a compromise. -# --------------------------------------------------------------------------- - -NAME=hyperledger/fabric-baseimage -RELEASE=`uname -m`-$1 -DOCKERHUB_NAME=$NAME:$RELEASE - -CURDIR=`dirname $0` - -docker inspect $DOCKERHUB_NAME 2>&1 > /dev/null -if [ "$?" == "0" ]; then - echo "BUILD-CACHE: exists!" - BASENAME=$DOCKERHUB_NAME -else - echo "BUILD-CACHE: Pulling \"$DOCKERHUB_NAME\" from dockerhub.." - docker pull $DOCKERHUB_NAME - docker inspect $DOCKERHUB_NAME 2>&1 > /dev/null - if [ "$?" == "0" ]; then - echo "BUILD-CACHE: Success!" - BASENAME=$DOCKERHUB_NAME - else - echo "BUILD-CACHE: WARNING - Build-cache unavailable, attempting local build" - (cd $CURDIR/../../images/base && make docker DOCKER_TAG=localbuild) - if [ "$?" != "0" ]; then - echo "ERROR: Build-cache could not be compiled locally" - exit -1 - fi - BASENAME=$NAME:localbuild - fi -fi - -# Ensure that we have the baseimage we are expecting -docker inspect $BASENAME 2>&1 > /dev/null -if [ "$?" != "0" ]; then - echo "ERROR: Unable to obtain a baseimage" - exit -1 -fi - -# any further errors should be fatal -set -e - -TMP=`mktemp -d` -DOCKERFILE=$TMP/Dockerfile - -LOCALSCRIPTS=$TMP/scripts -REMOTESCRIPTS=/hyperledger/scripts/provision - -mkdir -p $LOCALSCRIPTS -cp -R $CURDIR/* $LOCALSCRIPTS - -# extract the FQN environment and run our common.sh to create the :latest tag -cat < $DOCKERFILE -FROM $BASENAME -`for i in \`docker run -i $BASENAME /bin/bash -l -c printenv\`; -do - echo ENV $i -done` -COPY scripts $REMOTESCRIPTS -RUN $REMOTESCRIPTS/common.sh -RUN chmod a+rw -R /opt/gopath - -EOF - -[ ! -z "$http_proxy" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg http_proxy=$http_proxy" -[ ! -z "$https_proxy" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg https_proxy=$https_proxy" -[ ! -z "$HTTP_PROXY" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg HTTP_PROXY=$HTTP_PROXY" -[ ! -z "$HTTPS_PROXY" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg HTTPS_PROXY=$HTTPS_PROXY" -[ ! -z "$no_proxy" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg no_proxy=$no_proxy" -[ ! -z "$NO_PROXY" ] && DOCKER_ARGS_PROXY="$DOCKER_ARGS_PROXY --build-arg NO_PROXY=$NO_PROXY" -docker build $DOCKER_ARGS_PROXY -t $NAME:latest $TMP - -rm -rf $TMP diff --git a/scripts/provision/host.sh b/scripts/provision/host.sh deleted file mode 100755 index 41e3168521b..00000000000 --- a/scripts/provision/host.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -CURDIR=`dirname $0` - -$CURDIR/common.sh - -# Install docker-compose -curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose -chmod +x /usr/local/bin/docker-compose - -# Install Python, pip, behave, nose -# -# install python-dev and libyaml-dev to get compiled speedups -apt-get install --yes python-dev -apt-get install --yes libyaml-dev - -apt-get install --yes python-setuptools -apt-get install --yes python-pip -pip install --upgrade pip -pip install behave -pip install nose - -# updater-server, update-engine, and update-service-common dependencies (for running locally) -pip install -I flask==0.10.1 python-dateutil==2.2 pytz==2014.3 pyyaml==3.10 couchdb==1.0 flask-cors==2.0.1 requests==2.4.3 - -# Python grpc package for behave tests -# Required to update six for grpcio -pip install --ignore-installed six -pip install --upgrade 'grpcio==1.0.0' - -# install ruby and apiaryio -#apt-get install --yes ruby ruby-dev gcc -#gem install apiaryio - -# Install Tcl prerequisites for busywork -apt-get install --yes tcl tclx tcllib - -# Install NPM for the SDK -apt-get install --yes npm - -# Install JDK 1.8 for Java chaincode development -add-apt-repository ppa:openjdk-r/ppa -y -apt-get update && apt-get install openjdk-8-jdk -y - -# Download Gradle and create sym link -wget https://services.gradle.org/distributions/gradle-2.12-bin.zip -P /tmp --quiet -unzip -q /tmp/gradle-2.12-bin.zip -d /opt && rm /tmp/gradle-2.12-bin.zip -ln -s /opt/gradle-2.12/bin/gradle /usr/bin - -# Set the default JDK to 1.8 -update-java-alternatives -s java-1.8.0-openjdk-amd64 diff --git a/sdk/node/Makefile b/sdk/node/Makefile index b644e069bda..7637d6912ab 100644 --- a/sdk/node/Makefile +++ b/sdk/node/Makefile @@ -16,22 +16,67 @@ # under the License. # +BINDIR := node_modules/.bin +TYPINGS := $(BINDIR)/typings +TSC := $(BINDIR)/tsc +TYPEDOC := $(BINDIR)/typedoc + +DOCURI := "https://github.com/hyperledger/fabric/tree/master/sdk/node/" + +SRC := $(shell find src -type f) +CONFIG := $(shell ls *.json) + EXECUTABLES = npm K := $(foreach exec,$(EXECUTABLES),\ $(if $(shell which $(exec)),some string,$(error "No $(exec) in PATH: Check dependencies"))) -all: hfc +# npm tool->pkg mapping +npm.pkg.typings := typings +npm.pkg.tsc := typescript +npm.pkg.typedoc := typedoc + +all: lib doc + +.PHONY: lib doc +lib: lib/hfc.js +doc: doc/index.html + +.SECONDARY: $(TYPINGS) $(TSC) $(TYPEDOC) +$(BINDIR)/%: $(CONFIG) + @echo "[INSTALL] $@" + npm install $(npm.pkg.$(@F)) + @touch $@ + +doc/index.html: $(TYPEDOC) $(SRC) $(CONFIG) typedoc-special.d.ts Makefile + @echo "[BUILD] SDK documentation" + @-rm -rf $(@D) + @mkdir -p $(@D) + $(TYPEDOC) -m amd \ + --name 'Node.js Hyperledger Fabric SDK' \ + --includeDeclarations \ + --excludeExternals \ + --excludeNotExported \ + --out $(@D) \ + src/hfc.ts typedoc-special.d.ts + # Typedoc generates links to working GIT repo which is fixed + # below to use an official release URI. + find $(@D) -name '*.html' -exec sed -i 's!href="http.*sdk/node/!href="'$(DOCURI)'!' {} \; -.PHONY: hfc -hfc: - mkdir -p ./lib/protos - cp ../../protos/*.proto ./lib/protos - cp ../../membersrvc/protos/*.proto ./lib/protos - npm install && sudo npm install -g typescript && sudo npm install typings --global && typings install - tsc - ./makedoc.sh +lib/hfc.js: $(TYPINGS) $(TSC) $(SRC) $(CONFIG) Makefile + @echo "[BUILD] SDK" + @mkdir -p ./lib/protos + @cp ../../protos/*.proto ./lib/protos + @cp ../../membersrvc/protos/*.proto ./lib/protos + npm install + $(TYPINGS) install + $(TSC) -unit-tests: hfc +unit-tests: lib + @echo "[RUN] unit tests..." @./bin/run-unit-tests.sh clean: + @echo "[CLEAN]" + -rm -rf node_modules + -rm -rf doc + -find lib -type f | grep -v "protos/google" | grep -v "hash.js" | xargs rm diff --git a/sdk/node/README.md b/sdk/node/README.md index bc127842ed1..81b518688b9 100644 --- a/sdk/node/README.md +++ b/sdk/node/README.md @@ -1,52 +1,99 @@ -## Hyperledger Fabric Client SDK for Node.js +# Hyperledger Fabric Client SDK for Node.js -The Hyperledger Fabric Client SDK (HFC) provides a powerful and easy to use API to interact with a Hyperledger Fabric blockchain. +## Overview -To learn about how to install and use the Node.js SDK, please visit the [fabric documentation](http://hyperledger-fabric.readthedocs.io/en/latest/Setup/NodeSDK-setup). +The Hyperledger Fabric Client SDK (HFC) provides a powerful and easy to use API to interact with a [Hyperledger Fabric](https://www.hyperledger.org/) blockchain from a Node.js application. It provides a set of APIs to register and enroll new network clients, to deploy new chaincodes to the network, and to interact with existing chaincodes through chaincode function invocations and queries. -The [Going Deeper](#going-deeper) section discusses HFC's pluggability and extensibility design. It also describes the main object hierarchy to help you get started in navigating the reference documentation. The top-level class is `Chain`. +To view the complete Node.js SDK documentation, additional usage samples, and getting started materials please visit the Hyperledger Fabric [SDK documentation page](http://hyperledger-fabric.readthedocs.io/en/latest/nodeSDK/node-sdk-guide/). -The [Future Work](#future-work) section describes some upcoming work to be done. +## Installation - **WARNING**: To view the reference documentation correctly, you first need to build the SDK and confirm functionality by [running the unit tests](#running-unit-tests). Subsequently open the following URLs directly in your browser. Be sure to replace YOUR-FABRIC-DIR with the path to your fabric directory. +If you are interfacing with a network running the Hyperledger Fabric [`v0.5-developer-preview`](https://github.com/hyperledger-archives/fabric/tree/v0.5-developer-preview) branch, you must use the hfc version 0.5.x release and install it with the following command: - `file:///YOUR-FABRIC-DIR/sdk/node/doc/modules/_hfc_.html` + npm install hfc@0.5.x - `file:///YOUR-FABRIC-DIR/sdk/node/doc/classes/_hfc_.chain.html` +For networks running newer versions of the Hyperledger Fabric, please install the version of hfc that corresponds to the same version of the Fabric code base. For example, for the Fabric v0.6 branch install hfc with the following command: -### Going Deeper + npm install hfc@0.6.x -#### Pluggability -HFC was designed to support two pluggable components: +## Usage -1. Pluggable key value store which is used to retrieve and store keys associated with a member. The key value store is used to store sensitive private keys, so care must be taken to properly protect access. +### Terminology -2. Pluggable member service which is used to register and enroll members. Member services enables hyperledger to be a permissioned blockchain, providing security services such as anonymity, unlinkability of transactions, and confidentiality +In order to transact on a Hyperledger blockchain, you must first have an identity which is both **registered** and **enrolled**. **Registration** means issuing a user invitation to join a blockchain network. **Enrollment** means accepting a user invitation to join a blockchain network. -#### HFC objects and reference documentation -HFC is written primarily in typescript and is object-oriented. The source can be found in the `fabric/sdk/node/src` directory. +In the current Hyperleger Fabric implementation, the administative user admin is already registered on the network by the virtue of the fact that their user ID and password pair are already added to the membership services server confiruation file, [membersrvc.yaml](https://github.com/hyperledger/fabric/blob/master/membersrvc/membersrvc.yaml). Therefore, the admin user must only enroll to be able to transact on the network. After enrollment, the administrative user is then able to register and enroll new users, in the role of a registrar. New users are added to the network programmatically, through the HFC `chain.registerAndEnroll` API. -To go deeper, you can view the reference documentation in your browser by opening the [reference documentation](doc/modules/_hfc_.html) and clicking on **"hfc"** on the right-hand side under **"Globals"**. This will work after you have built the SDK per the instruction [here](#building-the-client-sdk). +### Example -The following is a high-level description of the HFC objects (classes and interfaces) to help guide you through the object hierarchy. +An example presented in the [SDK documentation](http://hyperledger-fabric.readthedocs.io/en/latest/nodeSDK/sample-standalone-app/) demonstrated a typical application using the hfc. -* The main top-level class is [Chain](doc/classes/_hfc_.chain.html). It is the client's representation of a chain. HFC allows you to interact with multiple chains and to share a single [KeyValStore](doc/interfaces/_hfc_.keyvalstore.html) and [MemberServices](doc/interfaces/_hfc_.memberservices.html) object with multiple Chain objects as needed. For each chain, you add one or more [Peer](doc/classes/_hfc_.peer.html) objects which represents the endpoint(s) to which HFC connects to transact on the chain. +The application sets up a target for the membership service server and enrolls the `admin` user. Subsequently, the admin user is set as the registrar user and registers and enrolls a new member, `JohnDoe`. The new member, `JohnDoe`, is then able to transact on the blockchain by deploying a chaincode, invoking a chaincode function, and querying chaincode state. -* The [KeyValStore](doc/interfaces/_hfc_.keyvalstore.html) is a very simple interface which HFC uses to store and retrieve all persistent data. This data includes private keys, so it is very important to keep this storage secure. The default implementation is a simple file-based version found in the [FileKeyValStore](doc/classes/_hfc_.filekeyvalstore.html) class. +### Chaincode Deployment -* The [MemberServices](doc/interfaces/_hfc_.memberservices.html) interface is implemented by the [MemberServicesImpl](doc/classes/_hfc_.memberservicesimpl.html) class and provides security and identity related features such as privacy, unlinkability, and confidentiality. This implementation issues *ECerts* (enrollment certificates) and *TCerts* (transaction certificates). ECerts are for enrollment identity and TCerts are for transactions. +Chaincode may be deployed in **development** mode or **network** mode. **Development** mode refers to running the chaincode as a stand alone process outside the network peer. This approach is useful while doing chaincode development and testing. The approach is described [here](http://hyperledger-fabric.readthedocs.io/en/latest/Setup/Chaincode-setup/). **Network** mode refers to packaging the chaincode and its dependencies and subsequently deploying the package to an existing network as a Docker container. To have the chaincode deployment with hfc succeed in network mode, you must properly set up the chaincode project and its dependencies. The instructions for doing so can be found in the [Chaincode Deployment](http://hyperledger-fabric.readthedocs.io/en/latest/nodeSDK/node-sdk-indepth/#chaincode-deployment) section of the Node.js SDK documentation page. -* The [Member](doc/classes/_hfc_.member.html) class most often represents an end user who transacts on the chain, but it may also represent other types of members such as peers. From the Member class, you can *register* and *enroll* members or users. This interacts with the [MemberServices](doc/interfaces/_hfc_.memberservices.html) object. You can also deploy, query, and invoke chaincode directly, which interacts with the [Peer](doc/classes/_hfc_.peer.html). The implementation for deploy, query and invoke simply creates a temporary [TransactionContext](doc/classes/_hfc_.transactioncontext.html) object and delegates the work to it. +### Enabling TLS -* The [TransactionContext](doc/classes/_hfc_.transactioncontext.html) class implements the bulk of the deploy, invoke, and query logic. It interacts with MemberServices to get a TCert to perform these operations. Note that there is a one-to-one relationship between TCert and TransactionContext; in other words, a single TransactionContext will always use the same TCert. If you want to issue multiple transactions with the same TCert, then you can get a [TransactionContext](doc/classes/_hfc_.transactioncontext.html) object from a [Member](doc/classes/_hfc_.member.html) object directly and issue multiple deploy, invoke, or query operations on it. Note however that if you do this, these transactions are linkable, which means someone could tell that they came from the same user, though not know which user. For this reason, you will typically just call deploy, invoke, and query on the User or Member object. +If you wish to enable TLS connection between the member services, the peers, and the chaincode container, please see the [Enabling TLS](http://hyperledger-fabric.readthedocs.io/en/latest/nodeSDK/node-sdk-indepth/#enabling-tls) section within the Node.js SDK documentation. -## Future Work -The following is a list of known remaining work to be done. +## Objects, Classes, And Interfaces -* The reference documentation needs to be made simpler to follow as there are currently some internal classes and interfaces that are being exposed. +HFC is written primarily in Typescript and is object-oriented. The source can be found in the [fabric/sdk/node](https://github.com/hyperledger/fabric/tree/master/sdk/node) directory of the Hyperledger Fabric project. To better understand the object hierarchy, please read the high-level descriptions of the HFC objects (classes and interfaces) in the [HFC Objects](http://hyperledger-fabric.readthedocs.io/en/latest/nodeSDK/node-sdk-indepth/#hfc-objects) section within the Node.js SDK documentation. You can also build the complete reference documentation locally by following the instructions [here](http://hyperledger-fabric.readthedocs.io/en/latest/nodeSDK/app-developer-env-setup/). -* We are investigating how to make deployment of a chaincode simpler by not requiring you to set up a specific directory structure with dependencies. +## Unit Tests -* Implement events appropriately, both custom and non-custom. The 'complete' event for `deploy` and `invoke` is currently implemented by simply waiting a set number of seconds (5 for invoke, 20 for deploy). It needs to receive a complete event from the server with the result of the transaction and make this available to the caller. This has not yet been implemented. +HFC includes a set of unit tests implemented with the [tape framework](https://github.com/substack/tape). Currently, the unit tests are designed to run inside the Hyperledger Fabric Vagrant development environment. To run the unit tests, first set up the Hyperledger Fabric Vagrant development environment per the instructions [here](http://hyperledger-fabric.readthedocs.io/en/latest/dev-setup/devenv/). -* Support SHA2. HFC currently supports SHA3. +Launch the Vagrant VM and log into the VM by executing the following command: + + vagrant ssh + +Run the unit tests within the Vagrant environment with the following commands: + + cd $GOPATH/src/github.com/hyperledger/fabric + make node-sdk-unit-tests + +The following are brief descriptions of each of the unit tests that are being run. + +#### registrar + +The [registrar.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/registrar.js) test case exercises registering users with the membership services server. It also tests registering a designated registrar user which can then register additional users. + +#### chain-tests + +The [chain-tests.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/chain-tests.js) test case exercises the [chaincode_example02.go](https://github.com/hyperledger/fabric/tree/master/examples/chaincode/go/chaincode_example02) chaincode when it has been deployed in both development mode and network mode. + +#### asset-mgmt + +The [asset-mgmt.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/asset-mgmt.js) test case exercises the [asset_management.go](https://github.com/hyperledger/fabric/tree/master/examples/chaincode/go/asset_management) chaincode when it has been deployed in both development mode and network mode. + +#### asset-mgmt-with-roles + +The [asset-mgmt-with-roles.js](https://github.com/hyperledger/fabric/blob/master/sdk/node/test/unit/asset-mgmt-with-roles.js) test case exercises the [asset_management_with_roles.go](https://github.com/hyperledger/fabric/tree/master/examples/chaincode/go/asset_management_with_roles) chaincode when it has been deployed in both development mode and network mode. + +## Basic Troubleshooting + +Keep in mind that you can perform the enrollment process with the membership services server only once, as the enrollmentSecret is a one-time-use password. If you have performed a user registration/enrollment with the membership services and subsequently deleted the crypto tokens stored on the client side, the next time you try to enroll, errors similar to the ones below will be seen. + + ``` + Error: identity or token do not match + ``` + ``` + Error: user is already registered + ``` + +To address this, remove any stored crypto material from the CA server by following the instructions [here](https://github.com/hyperledger/fabric/blob/master/docs/Setup/Chaincode-setup.md#removing-temporary-files-when-security-is-enabled). You will also need to remove any of the crypto tokens stored on the client side by deleting the KeyValStore directory. That directory is configurable and is set to `/tmp/keyValStore` within the unit tests. + +## Support + +We use [Hyperledger Slack](https://hyperledgerproject.slack.com/) for communication regarding the SDK and any potential issues. Please join SDK related channels on Slack such as #fabric-sdk and #nodesdk. If you would like to file a bug report or submit a request for a feature please do so on Jira [here](https://jira.hyperledger.org). + +## Contributing + +We welcome any contributions to the Hyperledger Fabric Node.js SDK project. If you would like to contribute please read the contribution guidelines [here](http://hyperledger-fabric.readthedocs.io/en/latest/CONTRIBUTING/) for more information. + +## License + +Contributed to the Hyperledger Project under the [Apache Software License 2.0](https://github.com/hyperledger/fabric/blob/master/LICENSE). diff --git a/sdk/node/bin/run-unit-tests.sh b/sdk/node/bin/run-unit-tests.sh index cc0baaa02aa..5fba3c0cce2 100755 --- a/sdk/node/bin/run-unit-tests.sh +++ b/sdk/node/bin/run-unit-tests.sh @@ -1,83 +1,314 @@ +#!/bin/bash # +# run-unit-tests.sh # Run the unit tests associated with the node.js client sdk +# Any arguments supplied to this script will be interpreted +# as a *js test file existing in # +# ${GOPATH}//src/github.com/hyperledger/fabric/sdk/node/test/unit +# +# By default all tests are run. Note that run-unit-tests.sh will +# only run tests that it knows about, since there are unique +# environmental prerequisites to many of the tests (including copying +# the fabric code into the chaincode directory). The 'case' statement +# in runTests() will need to be updated with the specific actions +# to perform for any new test that is added to the test/unit directory. +# +# The tests can be run against a local fabric network, or remote (such +# as Bluemix, HSBN, vLaunch, softLayer, etc.). +# +# The test will run 4 permutations of each test: +# TLS-disabled - deployMode=Net +# TLS-disabled - deployMode=Dev +# TLS-enabled - deployMode=Net +# TLS-enabled - deployMode=Dev +# While both 'dev' and 'net' mode are exercised, only +# 'net' mode will be executed when the network nodes +# are remote w/r/t to the host running the tests. +# +# There are six conditions that are fatal and will +# cause the tests to abort: +# Local: +# membersrvc build fails +# membersrvc fails to initialize +# peer build fails +# peer fails to initialize +# Remote: +# membersrvc is unreachable +# peer is unreachable +# +# The following environment variables will determine the +# network resources to be tested +# SDK_KEYSTORE - local directory under which user +# authentication data is stored +# SDK_CA_CERT_FILE - CA certificate used to authenticate +# network nodes +# SDK_CA_CERT_HOST - Expected host identity in server certificate +# Default is 'tlsca' +# SDK_MEMBERSRVC_ADDRESS - ip address or hostname of membersrvc +# SDK_PEER_ADDRESS - ip address or hostname of peer node under test; +# it is assumed that only one peer will contacted +# SDK_TLS - Set to '1' (use TLS) or '0' (do not use TLS) +# Note that if TLS is requested, a CA cert must be used. +# The default certificate generated by membersrvc is the default: +# /var/hyperledger/production/.membersrvc/tlsca.cert +# The run-unit-tests.sh script will run all unit tests +# twice, one without TLS and one with TLS enabled +# +# Other environment variables that will be referenced by individual tests +# SDK_DEPLOYWAIT - time (in seconds) to wait after sending deploy request +# SDK_INVOKEWAIT - time (in seconds) to wait after sending invoke request +# GRPC_SSL_CIPHER_SUITES - quoted, colon-delimited list of specific cipher suites +# that node.js client sdk should propose. +# The default list is set in sdk/node/lib/hfc.js +# SDK_DEFAULT_USER - User defined with 'registrar' authority. Default is 'WebAppAdmin' +# SDK_DEFAULT_SECRET - Password for SDK_DEFAULT_USER. Defaults to 'DJY27pEnl16d' +# When running a local network, these are configured in the +# membersrvc.yaml file. In the IBM Bluemix starter and HSBN +# networks, this password is generated dynamically and returned +# in the network credentials file. +# SDK_KEYSTORE_PERSIST - Set to '0' will delete all previously generated auth +# enrollment data prior to running the tests +# Set to '1' keep the auth data from previous enrollments +# SDK_CHAINCODE_PATH - the directory (relative to ${GOPATH}/src/) which contains +# the chaincode (and CA cert) to be deployed +# SDK_CHAINCODE_ID - the chaincode ID from a previous deployment. e.g. can be used +# to invoke/query previously-deployed chaincode +# +export NODE_PATH=${GOPATH}/src/github.com/hyperledger/fabric/sdk/node:${GOPATH}/src/github.com/hyperledger/fabric/sdk/node/lib:/usr/local/lib/node_modules:/usr/lib/nodejs:/usr/lib/node_modules:/usr/share/javascript +#export NODE_PATH=${GOPATH}/src/github.com/hyperledger/fabric/sdk/node:/usr/local/lib/node_modules:/usr/lib/nodejs:/usr/lib/node_modules:/usr/share/javascript -# Variable to store error results -NODE_ERR_CODE=0 +errorExit() { + printf "%s...exiting\n" "$1" + exit 1 +} -main() { - # Initialization - init +resolvHost() { + # simple 'host' or 'nslookup' doesn't work for + # /etc/host entries...attempt to resolve via ping + local host="$1" + ping -w2 -c1 "$host" | head -n1 | awk -F'[()]' '{print $2}' +} + +isLocal() { + # determine if the ca/peer instance in question + # is running native/vagrant on this local machine, + # or on a remote network. echo 'true' or 'false' + # This permits constructions like + # $(isLocal ) + # to return 0 (true) or 1 (false) + + local addr="$1" + local port="$2" - # Start member services - startMemberServices + # assume remote + local result="false" - # Run tests in network mode - export DEPLOY_MODE='net' - runTests + # if localhost, return true + if test ${addr%%.*} = "127"; then + result="true" + else + # search this machine for address + ip addr list | + awk -v s="$addr" -v rc=1 ' + $1=="inet" { gsub(/\/.*/,"",$2); if ($2==s) rc=0 } + END { exit rc }' + if test $? -eq 0; then + # address is local but still peer might be running in container + # if docker-proxy is not running this instance, return true + sudo netstat -tlpn | grep "$port" | + awk -F '/' '{print $NF}'| grep -q proxy + test $? = 0 || result="true" + fi + fi + echo "$result" + return 0 +} - # Run tests in dev mode - export DEPLOY_MODE='dev' - runTests +isReachable() { + # a test to see if there is a listener on + # specified host:port + # netcat would be *far* simpler: + # nc -nzvt host port + # but not guaranteed to be installed + # so use python, since it is ubiquitious + local host="$1" + local port="$2" + test -z "$host" -o -z "$port" && return 1 - # Stop peer and member services - stopPeer - stopMemberServices + python - < /dev/null - cp -r fabric/vendor/github.com/op .. - cd ../../.. - go build + elif test ! -d $SRC_DIR; then + echo "ERROR: directory does not exist: $SRCDIR" + rc=$((rc+1)) + else + mkdir $DSTDIR + cd $DSTDIR + cp $SRCDIR/${chaincodeSrc}.go . + test "$SDK_TLS" = "1" && cp "$SDK_CA_CERT_FILE" . + mkdir -p vendor/github.com/hyperledger + cd vendor/github.com/hyperledger + echo "copying github.com/hyperledger/fabric; please wait ..." + # git clone https://github.com/hyperledger/fabric > /dev/null + cp -r $FABRIC . + cp -r fabric/vendor/github.com/op .. + cd ../../.. + + echo "Building chaincode..." + go build + rc+=$? + fi + + return $rc } # $1 is the name of the sample to start startExampleInDevMode() { - SRCDIR=$EXAMPLES/$1 - if [ ! -d $SRCDIR ]; then - echo "FATAL ERROR: directory does not exist: $SRCDIR" - NODE_ERR_CODE=1 - exit 1; - fi - EXE=$SRCDIR/$1 - if [ ! -f $EXE ]; then - cd $SRCDIR - go build - fi + local chaincodeSrc="$1" + local rc=0 export CORE_CHAINCODE_ID_NAME=$2 - export CORE_PEER_ADDRESS=0.0.0.0:7051 - startProcess "$EXE" "${EXE}.log" "$1" + export CORE_PEER_ADDRESS=localhost:7051 + + SRCDIR=${EXAMPLES}/${chaincodeSrc} + + if test ! -d $SRC_DIR; then + echo "ERROR: directory does not exist: $SRCDIR" + rc=1 + else + EXE=${SRCDIR}/${chaincodeSrc} + if test ! -f $EXE; then + cd $SRCDIR + go build + rc=$? + fi + + test "$rc" = 0 && startProcess "$EXE" "${EXE}.log" "$chaincodeSrc" + let rc+=$? + fi + + return $rc } -# $1 is the name of the sample to start +# $1 is the name of the sample to stop stopExampleInDevMode() { + echo "killing $1" killProcess $1 } runRegistrarTests() { + local rc=0 echo "BEGIN running registrar tests ..." node $UNITTEST/registrar.js - if [ $? -ne 0 ]; then - echo "ERROR running registrar tests!" - NODE_ERR_CODE=1 - fi + rc=$? echo "END running registrar tests" + return $rc +} + +runMemberApi() { + local rc=0 + echo "BEGIN running member-api tests ..." + node $UNITTEST/member-api.js + rc=$? + echo "END running member-api tests" + return $rc } runChainTests() { + local rc=0 echo "BEGIN running chain-tests ..." preExample chaincode_example02 mycc1 - node $UNITTEST/chain-tests.js - if [ $? -ne 0 ]; then - echo "ERROR running chain-tests!" - NODE_ERR_CODE=1 + if test $? -eq 0; then + node $UNITTEST/chain-tests.js + rc=$? + postExample chaincode_example02 + else + echo "setup failed" + let rc+=1 fi - postExample chaincode_example02 echo "END running chain-tests" + return $rc } runAssetMgmtTests() { + local rc=0 echo "BEGIN running asset-mgmt tests ..." preExample asset_management mycc2 - node $UNITTEST/asset-mgmt.js - if [ $? -ne 0 ]; then - echo "ERROR running asset-mgmt tests!" - NODE_ERR_CODE=1 + if test $? -eq 0; then + node $UNITTEST/asset-mgmt.js + rc=$? + postExample asset_management + else + echo "setup failed" + let rc+=1 fi - postExample asset_management echo "END running asset-mgmt tests" + return $rc } runAssetMgmtWithRolesTests() { + local rc=0 echo "BEGIN running asset management with roles tests ..." preExample asset_management_with_roles mycc3 - node $UNITTEST/asset-mgmt-with-roles.js - if [ $? -ne 0 ]; then - echo "ERROR running asset management with roles tests!" - NODE_ERR_CODE=1 + if test $? -eq 0; then + node $UNITTEST/asset-mgmt-with-roles.js + rc=$? + postExample asset_management_with_roles + else + echo "setup failed" + let rc+=1 fi - postExample asset_management_with_roles echo "END running asset management with roles tests" + return $rc } +runAssetMgmtWithDynamicRolesTests() { + local rc=0 + echo "BEGIN running asset management with dynamic roles tests ..." + preExample asset_management_with_roles mycc4 + if test $? -eq 0; then + node $UNITTEST/asset-mgmt-with-dynamic-roles.js + rc=$? + echo "RC:" $rc + postExample asset_management_with_roles + else + echo "setup failed" + let rc+=1 + fi + echo "END running asset management with dynamic roles tests" + return $rc +} # start process # $1 is executable path with any args # $2 is the log file # $3 is string description of the process startProcess() { - $1 > $2 2>&1& + local cmd="$1" + local log="$2" + local proc="$3" + + $cmd >> $log 2>&1& PID=$! sleep 2 kill -0 $PID > /dev/null 2>&1 - if [ $? -eq 0 ]; then - echo "$3 is started" + if test $? -eq 0; then + echo "$proc is started" else - echo "ERROR: $3 failed to start; see $2" - NODE_ERR_CODE=1 - exit 1 + echo "ERROR: $proc failed to start; see $log" + return 1 fi } # kill a process # $1 is the executable name killProcess() { - PID=`ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}'` - if [ "$PID" != "" ]; then - echo "killing PID $PID running $1 ..." + local proc="$1" + PID=$(ps -ef | awk -v s="$proc" '$0~s && $8!="awk" {print $2}') + if test -n "$PID"; then + echo "killing PID $PID running $proc ..." kill -9 $PID fi } -main +# Run tests +runTests() { + local TestsToBeRun="$1" + local rc=0 + + echo "Begin running tests in $SDK_DEPLOY_MODE mode ..." + # restart peer + if $($peerAddrIsLocal); then + stopPeer + startPeer || errorExit "Start peer failed." + fi + + for Test in $TestsToBeRun; do + # echo "HIT TO RUN NEXT TEST...." + # read x + case "${Test##*/}" in + "registrar.js") runRegistrarTests ;; + "chain-tests.js") runChainTests ;; + "asset-mgmt.js") runAssetMgmtTests ;; + "member-api.js") runMemberApi ;; +"asset-mgmt-with-dynamic-roles.js") if test "$SDK_TLS" = 0; then + runAssetMgmtWithDynamicRolesTests + else + echo "FAB-392; SKIPPING AssetMgmtWithDynamicRolesTests" + fi ;; + # bug...FAB-392 - ACA combined with TLS fails - re-enable this for all tests when closed + "asset-mgmt-with-roles.js") if test "$SDK_TLS" = 0; then + runAssetMgmtWithRolesTests + else + echo "FAB-392; SKIPPING AssetMgmtWithRolesTests" + fi ;; + *) echo "NO case statement for ${Test##*/}, skipping..." ;; + esac + if test $? -ne 0; then + echo "******* ${Test##*/} failed! *******" + let NODE_ERR_CODE=$((NODE_ERR_CODE+1)) + fi + echo "**************************************************************" + echo "" + echo "" + done + + echo "End running tests in $SDK_DEPLOY_MODE mode" + sleep 5 +} -if [ "$NODE_ERR_CODE" != "0" ]; then - echo "ERROR: Error executing run-unit-tests.sh. Exiting with status code '1'." - exit 1 -fi +main() { + # Initialization + echo "Initilizing environment..." + init + { + printf "%s -----> Beginning nodejs SDK UT tests...\n" "$(date)" + for t in ${TEST_SUITE[*]}; do + echo ${t##*/} | sed 's/^/ /' + done + + # Start member services + for tlsEnabled in 0 1; do + export SDK_TLS=$tlsEnabled + test "$SDK_TLS" = 0 && echo "Running NON-TLS-enabled tests..." || echo "Running TLS-enabled tests..." + + if test "$SDK_TLS" = "1"; then + : ${SDK_CA_CERT_FILE:="/var/hyperledger/production/.membersrvc/tlsca.cert"} + : ${SDK_CA_KEY_FILE:="/var/hyperledger/production/.membersrvc/tlsca.priv"} + export SDK_CA_CERT_FILE + else + export MEMBERSRVC_CA_ACA_ENABLED=true + fi + + $($caAddrIsLocal) && startMemberServices + test $? -eq 0 || errorExit "Failed to start membersrvc" + + # Run tests in network mode + SDK_DEPLOYWAIT=40 + SDK_INVOKEWAIT=15 + export SDK_DEPLOY_MODE='net' + runTests "$TEST_SUITE" + + # Run tests in dev mode + SDK_DEPLOYWAIT=10 + SDK_INVOKEWAIT=5 + if $($peerAddrIsLocal); then + export SDK_DEPLOY_MODE='dev' + runTests "$TEST_SUITE" + fi + + # Stop peer and member services + $($peerAddrIsLocal) && stopPeer + $($caAddrIsLocal) && stopMemberServices + + # do not delete the authentication data if this is set + test "$SDK_KEYSTORE_PERSIST" != "1" && + rm -rf /var/hyperledger/production /tmp/*keyValStore* + + done + printf "\n%s\n" "${NODE_ERR_CODE}" + } 2>&1 | tee $LOGDIR/log +} + +TEST_SUITE="$@" +main +NODE_ERR_CODE=$(sed -n '$p' $LOGDIR/log | awk '{print $NF}') +echo "exit code: $NODE_ERR_CODE" +printf "%s " $(date) +test "$NODE_ERR_CODE" -eq 0 && echo "UT tests PASSED" || echo "UT tests FAILED" +exit $NODE_ERR_CODE diff --git a/sdk/node/makedoc.sh b/sdk/node/makedoc.sh deleted file mode 100755 index 89d6cad71a1..00000000000 --- a/sdk/node/makedoc.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -# ------------------------------------------------------------- -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# ------------------------------------------------------------- - -typedoc=$(which typedoc) -if [[ $? -ne 0 ]]; then - echo "Installing typedoc ..." - sudo npm install -g typedoc - if [[ $? -ne 0 ]]; then - echo "No typedoc found. Please install it like this:" - echo " npm install -g typedoc" - echo "and rerun this shell script again." - exit 1 - fi - echo "Successfully installed typedoc" -fi -set -e - -tv="$(typings -v)" -if [[ "${tv%.*}" < "1.0" ]]; then - echo "You have typings ${tv} but you need 1.0 or higher." - exit 1 -fi - -mkdir -p doc -rm -rf doc/* - -typedoc -m amd \ ---name 'Node.js Hyperledger Fabric SDK' \ ---includeDeclarations \ ---excludeExternals \ ---excludeNotExported \ ---out doc \ -src/hfc.ts typedoc-special.d.ts - -# Typedoc generates links to working GIT repo which is fixed -# below to use an official release URI. -DOCURI="https://github.com/hyperledger/fabric/tree/master/sdk/node/" -find doc -name '*.html' -exec sed -i 's!href="http.*sdk/node/!href="'$DOCURI'!' {} \; diff --git a/sdk/node/package.json b/sdk/node/package.json index 4eaa8f696a2..b5a51318bef 100644 --- a/sdk/node/package.json +++ b/sdk/node/package.json @@ -1,9 +1,19 @@ { "name": "hfc", - "version": "0.0.2", + "version": "0.6.2", + "description": "The Hyperledger Fabric Client SDK (HFC) provides a powerful and easy to use API to interact with a Hyperledger Fabric blockchain.", + "keywords": [ + "hfc", + "hyperledger", + "blockchain" + ], "bin": "./bin/main.js", "typings": "hfc.d.ts", "main": "index.js", + "repository": { + "type": "github", + "url": "https://github.com/hyperledger/fabric" + }, "dependencies": { "aes-js": "^1.0.0", "asn1": "https://github.com/mcavage/node-asn1", @@ -12,9 +22,11 @@ "crypto": "0.0.3", "debug": "^2.2.0", "elliptic": "^6.2.3", + "es6-set": "^0.1.4", "events": "^1.1.0", "fs": "0.0.2", - "grpc": "^0.13.2-pre1", + "grpc": "1.0.0", + "hashtable": "^2.0.2", "js-sha3": "^0.5.1", "json-stringify-safe": "^5.0.1", "jsrsasign": "^5.0.10", @@ -22,15 +34,18 @@ "node-uuid": "^1.4.7", "node.extend": "^1.1.5", "pkijs": "^1.3.19", - "sjcl": "^1.0.3", - "sjcl-codec": "^0.1.1", + "sjcl": "1.0.3", + "sjcl-codec": "0.1.1", "sleep": "^3.0.1", "tar-fs": "^1.13.0", + "typescript": "1.8.10", "url": "^0.11.0", "util": "^0.10.3", "uuidv4": "^0.3.1" }, "devDependencies": { - "tape": "^4.5.1" - } + "tape": "^4.5.1", + "x509": "^0.2.6" + }, + "license": "Apache License v2.0" } diff --git a/sdk/node/src/crypto.ts b/sdk/node/src/crypto.ts index 5b577d0d281..a905687d9a2 100644 --- a/sdk/node/src/crypto.ts +++ b/sdk/node/src/crypto.ts @@ -115,13 +115,13 @@ export class Crypto { * @params hashAlgorithm The hash algorithm ('SHA2' or 'SHA3') */ setHashAlgorithm(hashAlgorithm:string):void { - this.checkHashFunction(hashAlgorithm); + Crypto.checkHashFunction(hashAlgorithm); this.hashAlgorithm = hashAlgorithm; this.initialize(); } - generateNonce() { + static generateNonce() { return crypto.randomBytes(NonceSize); } @@ -305,11 +305,11 @@ export class Crypto { return decryptedBytes; } - aesKeyGen() { + static aesKeyGen() { return crypto.randomBytes(AESKeyLength); } - aesCFBDecryt(key, encryptedBytes) { + static aesCFBDecrypt(key, encryptedBytes) { var iv = crypto.randomBytes(IVLength); var aes = new aesjs.ModeOfOperation.cfb(key, iv, IVLength); @@ -336,13 +336,13 @@ export class Crypto { var decryptedBytes, unpaddedBytes; - decryptedBytes = this.CBCDecrypt(key, bytes); - unpaddedBytes = this.PKCS7UnPadding(decryptedBytes); + decryptedBytes = Crypto.CBCDecrypt(key, bytes); + unpaddedBytes = Crypto.PKCS7UnPadding(decryptedBytes); return unpaddedBytes; }; - aes256GCMDecrypt(key:Buffer, ct:Buffer) { + static aes256GCMDecrypt(key:Buffer, ct:Buffer) { let decipher = crypto.createDecipheriv('aes-256-gcm', key, ct.slice(0, GCMStandardNonceSize)); decipher.setAuthTag(ct.slice(ct.length - GCMTagSize)); let dec = decipher.update( @@ -361,7 +361,7 @@ export class Crypto { if (!info) info = ""; - var key = this.hkdf2(bytesToBits(new Buffer(ikm)), keyBitLength, bytesToBits(salt), info, this.hashFunctionKeyDerivation); + var key = Crypto.hkdf2(bytesToBits(new Buffer(ikm)), keyBitLength, bytesToBits(salt), info, this.hashFunctionKeyDerivation); return bitsToBytes(key); @@ -394,7 +394,7 @@ export class Crypto { throw new Error("Illegal level: " + this.securityLevel + " - must be either 256 or 384"); } - private checkHashFunction(hashAlgorithm: string) { + private static checkHashFunction(hashAlgorithm: string) { if (!_isString(hashAlgorithm)) throw new Error("Illegal Hash function family: " + hashAlgorithm + " - must be either SHA2 or SHA3"); @@ -405,7 +405,7 @@ export class Crypto { private initialize() { this.checkSecurityLevel(this.securityLevel); - this.checkHashFunction(this.hashAlgorithm); + Crypto.checkHashFunction(this.hashAlgorithm); this.suite = this.hashAlgorithm.toLowerCase() + '-' + this.securityLevel; if (this.securityLevel == CURVE_P_256_Size) { @@ -454,7 +454,7 @@ export class Crypto { * @param {Object} [Hash=sjcl.hash.sha256] The hash function to use. * @return {bitArray} derived key. */ - private hkdf2(ikm, keyBitLength, salt, info, Hash) { + private static hkdf2(ikm, keyBitLength, salt, info, Hash) { var hmac, key, i, hashLen, loops, curOut, ret = []; // Hash = Hash || sjcl.hash.sha256; @@ -496,7 +496,7 @@ export class Crypto { return sjcl.bitArray.clamp(ret, keyBitLength); } - private CBCDecrypt(key, bytes) { + private static CBCDecrypt(key, bytes) { debug('key length: ', key.length); debug('bytes length: ', bytes.length); var iv = bytes.slice(0, BlockSize); @@ -524,7 +524,6 @@ export class Crypto { start += BlockSize; end += BlockSize; } - ; decryptedBytes = Buffer.concat(decryptedBlocks); } @@ -539,7 +538,7 @@ export class Crypto { }; - private PKCS7UnPadding(bytes) { + private static PKCS7UnPadding(bytes) { //last byte is the number of padded bytes var padding = bytes.readUInt8(bytes.length - 1); diff --git a/sdk/node/src/hfc.ts b/sdk/node/src/hfc.ts index 867078a7274..9e493133184 100644 --- a/sdk/node/src/hfc.ts +++ b/sdk/node/src/hfc.ts @@ -43,8 +43,16 @@ * the server side transaction processing path. */ -// Instruct boringssl to use ECC for tls. -process.env['GRPC_SSL_CIPHER_SUITES'] = 'HIGH+ECDSA'; +process.env.GRPC_SSL_CIPHER_SUITES = process.env.GRPC_SSL_CIPHER_SUITES + ? process.env.GRPC_SSL_CIPHER_SUITES + : 'ECDHE-RSA-AES128-GCM-SHA256:' + + 'ECDHE-RSA-AES128-SHA256:' + + 'ECDHE-RSA-AES256-SHA384:' + + 'ECDHE-RSA-AES256-GCM-SHA384:' + + 'ECDHE-ECDSA-AES128-GCM-SHA256:' + + 'ECDHE-ECDSA-AES128-SHA256:' + + 'ECDHE-ECDSA-AES256-SHA384:' + + 'ECDHE-ECDSA-AES256-GCM-SHA384' ; var debugModule = require('debug'); var fs = require('fs'); @@ -55,6 +63,8 @@ var jsrsa = require('jsrsasign'); var elliptic = require('elliptic'); var sha3 = require('js-sha3'); var BN = require('bn.js'); +var Set = require('es6-set'); +var HashTable = require('hashtable'); import * as crypto from "./crypto" import * as stats from "./stats" import * as sdk_util from "./sdk_util" @@ -159,6 +169,8 @@ export interface RegistrationRequest { roles?:string[]; // Affiliation for a user affiliation:string; + // The attribute names and values to grant to this member + attributes?:Attribute[]; // 'registrar' enables this identity to register other members with types // and can delegate the 'delegationRoles' roles registrar?:{ @@ -169,6 +181,15 @@ export interface RegistrationRequest { }; } +// An attribute consisting of a name and value +export interface Attribute { + // The attribute name + name:string; + // The attribute value + value:string; +} + +// An enrollment request export interface EnrollmentRequest { // The enrollment ID enrollmentID:string; @@ -198,6 +219,12 @@ export interface Enrollment { chainKey:string; } +// GRPCOptions +export interface GRPCOptions { + pem: string; + hostnameOverride: string; +} + // A request to get a batch of TCerts export class GetTCertBatchRequest { constructor( public name: string, @@ -290,7 +317,7 @@ export class Certificate { /** * Enrollment certificate. */ -export class ECert extends Certificate { +class ECert extends Certificate { constructor(public cert:Buffer, public privateKey:any) { @@ -335,6 +362,8 @@ export interface DeployRequest extends TransactionRequest { chaincodePath:string; // The name identifier for the chaincode to deploy in development mode. chaincodeName:string; + // The directory on the server side, where the certificate.pem will be copied + certificatePath:string; } /** @@ -369,6 +398,7 @@ export interface TransactionProtobuf { setConfidentialityProtocolVersion(version:string):void; setNonce(nonce:Buffer):void; setToValidators(Buffer):void; + getTxid():string; getChaincodeID():{buffer: Buffer}; setChaincodeID(buffer:Buffer):void; getMetadata():{buffer: Buffer}; @@ -419,6 +449,9 @@ export class Chain { // The member services used for this chain private memberServices:MemberServices; + // The eventHub service used for this chain + private eventHub:EventHub; + // The key-val store used for this chain private keyValStore:KeyValStore; @@ -430,7 +463,7 @@ export class Chain { // Temporary variables to control how long to wait for deploy and invoke to complete before // emitting events. This will be removed when the SDK is able to receive events from the - private deployWaitTime:number = 20; + private deployWaitTime:number = 30; private invokeWaitTime:number = 5; // The crypto primitives object @@ -438,6 +471,7 @@ export class Chain { constructor(name:string) { this.name = name; + this.eventHub = new EventHub(); } /** @@ -450,11 +484,24 @@ export class Chain { /** * Add a peer given an endpoint specification. - * @param endpoint The endpoint of the form: { url: "grpcs://host:port", tls: { .... } } + * @param url The URL of the peer. + * @param opts Optional GRPC options. * @returns {Peer} Returns a new peer. */ - addPeer(url:string, pem?:string):Peer { - let peer = new Peer(url, this, pem); + addPeer(url:string, opts?:GRPCOptions):Peer { + + //check to see if the peer is already part of the chain + this.peers.forEach(function(peer){ + if (peer.getUrl()===url) + { + var error = new Error(); + error.name = "DuplicatePeer"; + error.message = "Peer with URL " + url + " is already a member of the chain"; + throw error; + } + }) + + let peer = new Peer(url, this, opts); this.peers.push(peer); return peer; }; @@ -485,9 +532,10 @@ export class Chain { /** * Set the member services URL * @param {string} url Member services URL of the form: "grpc://host:port" or "grpcs://host:port" + * @param {GRPCOptions} opts optional GRPC options */ - setMemberServicesUrl(url:string, pem?:string):void { - this.setMemberServices(newMemberServices(url,pem)); + setMemberServicesUrl(url:string, opts?:GRPCOptions):void { + this.setMemberServices(newMemberServices(url,opts)); } /** @@ -508,6 +556,29 @@ export class Chain { } }; + /** + * Get the eventHub service associated this chain. + * @returns {eventHub} Return the current eventHub service, or undefined if not set. + */ + getEventHub():EventHub{ + return this.eventHub; + }; + + /** + * Set and connect to the peer to be used as the event source. + */ + eventHubConnect(peerUrl: string, opts?:GRPCOptions):void { + this.eventHub.setPeerAddr(peerUrl, opts); + this.eventHub.connect(); + }; + + /** + * Set and connect to the peer to be used as the event source. + */ + eventHubDisconnect():void { + this.eventHub.disconnect(); + }; + /** * Determine if security is enabled. */ @@ -529,6 +600,20 @@ export class Chain { this.preFetchMode = preFetchMode; } + /** + * Enable or disable ECDSA mode for GRPC. + */ + setECDSAModeForGRPC(enabled:boolean):void { + // TODO: Handle multiple chains in different modes appropriately; this will not currently work + // since it is based env variables. + if (enabled) { + // Instruct boringssl to use ECC for tls. + process.env.GRPC_SSL_CIPHER_SUITES = 'HIGH+ECDSA'; + } else { + delete process.env.GRPC_SSL_CIPHER_SUITES; + } + } + /** * Determine if dev mode is enabled. */ @@ -552,6 +637,11 @@ export class Chain { /** * Set the deploy wait time in seconds. + * Node.js will automatically enforce a + * minimum and maximum wait time. If the + * number of seconds is larger than 2147483, + * less than 1, or not a number, + * the actual wait time used will be 1 ms. * @param secs */ setDeployWaitTime(secs:number):void { @@ -603,7 +693,8 @@ export class Chain { } /** - * Get the user member named 'name'. + * Get the user member named 'name' or create + * a new member if the member does not exist. * @param cb Callback of form "function(err,Member)" */ getMember(name:string, cb:GetMemberCallback):void { @@ -627,7 +718,11 @@ export class Chain { } // Try to get the member from cache. - // If not found, create a new one, restore the state if found, and then store in cache. + // If not found, create a new one. + // If member is found in the key value store, + // restore the state to the new member, store in cache and return the member. + // If there are no errors and member is not found in the key value store, + // return the new member. private getMemberHelper(name:string, cb:GetMemberCallback) { let self = this; // Try to get the member state from the cache @@ -637,6 +732,7 @@ export class Chain { member = new Member(name, self); member.restoreState(function (err) { if (err) return cb(err); + self.members[name]=member; cb(null, member); }); } @@ -737,7 +833,6 @@ export class Member { private chain:Chain; private name:string; private roles:string[]; - private account:string; private affiliation:string; private enrollmentSecret:string; private enrollment:any; @@ -759,7 +854,6 @@ export class Member { let req = cfg; this.name = req.enrollmentID || req.name; this.roles = req.roles || ['fabric.user']; - this.account = req.account; this.affiliation = req.affiliation; } this.chain = chain; @@ -809,22 +903,6 @@ export class Member { this.roles = roles; }; - /** - * Get the account. - * @returns {string} The account. - */ - getAccount():string { - return this.account; - }; - - /** - * Set the account. - * @param account The account. - */ - setAccount(account:string):void { - this.account = account; - }; - /** * Get the affiliation. * @returns {string} The affiliation. @@ -932,7 +1010,7 @@ export class Member { if (err) return cb(err); self.enrollment = enrollment; // Generate queryStateKey - self.enrollment.queryStateKey = self.chain.cryptoPrimitives.generateNonce(); + self.enrollment.queryStateKey = crypto.Crypto.generateNonce() // Save state self.saveState(function (err) { @@ -1083,7 +1161,6 @@ export class Member { if (state.name !== this.getName()) throw Error("name mismatch: '" + state.name + "' does not equal '" + this.getName() + "'"); this.name = state.name; this.roles = state.roles; - this.account = state.account; this.affiliation = state.affiliation; this.enrollmentSecret = state.enrollmentSecret; this.enrollment = state.enrollment; @@ -1098,7 +1175,6 @@ export class Member { let state = { name: self.name, roles: self.roles, - account: self.account, affiliation: self.affiliation, enrollmentSecret: self.enrollmentSecret, enrollment: self.enrollment @@ -1121,6 +1197,10 @@ export class TransactionContext extends events.EventEmitter { private binding: any; private tcert:TCert; private attrs:string[]; + private complete:boolean; + private timeoutId:any; + private waitTime:number; + private cevent:any; constructor(member:Member, tcert:TCert) { super(); @@ -1128,7 +1208,9 @@ export class TransactionContext extends events.EventEmitter { this.chain = member.getChain(); this.memberServices = this.chain.getMemberServices(); this.tcert = tcert; - this.nonce = this.chain.cryptoPrimitives.generateNonce(); + this.nonce = crypto.Crypto.generateNonce(); + this.complete = false; + this.timeoutId = null; } /** @@ -1353,7 +1435,47 @@ export class TransactionContext extends events.EventEmitter { }); self.getChain().sendTransaction(tx, emitter); } else { - self.getChain().sendTransaction(tx, self); + let txType = tx.pb.getType(); + let uuid = tx.pb.getTxid(); + let eh = self.getChain().getEventHub(); + // async deploy and invokes need to maintain + // tx context(completion status(self.complete)) + if ( txType == _fabricProto.Transaction.Type.CHAINCODE_DEPLOY) { + self.cevent = new EventDeployComplete(uuid, tx.chaincodeID); + self.waitTime = self.getChain().getDeployWaitTime(); + } else if ( txType == _fabricProto.Transaction.Type.CHAINCODE_INVOKE) { + self.cevent = new EventInvokeComplete("Tx "+uuid+" complete"); + self.waitTime = self.getChain().getInvokeWaitTime(); + } + eh.registerTxEvent(uuid, function (uuid) { + self.complete = true; + if (self.timeoutId) { + clearTimeout(self.timeoutId); + } + eh.unregisterTxEvent(uuid); + self.emit("complete", self.cevent); + }); + self.getChain().sendTransaction(tx, self); + // sync query can be skipped as response + // is processed and event generated in sendTransaction + // no timeout processing is necessary + if ( txType != _fabricProto.Transaction.Type.CHAINCODE_QUERY) { + debug("waiting %d seconds before emitting complete event", self.waitTime); + self.timeoutId = setTimeout(function() { + debug("timeout uuid=", uuid); + if(!self.complete) + // emit error if eventhub connect otherwise + // emit a complete event as done previously + if(eh.isconnected()) + self.emit("error","timed out waiting for transaction to complete"); + else + self.emit("complete",self.cevent); + else + eh.unregisterTxEvent(uuid); + }, + self.waitTime * 1000 + ); + } } } else { debug('Missing TCert...'); @@ -1404,7 +1526,7 @@ export class TransactionContext extends events.EventEmitter { var stateKey; if (transaction.pb.getType() == _fabricProto.Transaction.Type.CHAINCODE_DEPLOY) { // The request is for a deploy - stateKey = new Buffer(self.chain.cryptoPrimitives.aesKeyGen()); + stateKey = new Buffer(crypto.Crypto.aesKeyGen()); } else if (transaction.pb.getType() == _fabricProto.Transaction.Type.CHAINCODE_INVOKE ) { // The request is for an execute // Empty state key @@ -1481,7 +1603,7 @@ export class TransactionContext extends events.EventEmitter { ); debug('Decrypt Result [%s]', ct.toString('hex')); - return this.chain.cryptoPrimitives.aes256GCMDecrypt(key, ct); + return crypto.Crypto.aes256GCMDecrypt(key, ct); } /** @@ -1643,6 +1765,16 @@ export class TransactionContext extends events.EventEmitter { // Substitute the hashStrHash for the image name dockerFileContents = util.format(dockerFileContents, hash); + // Add the certificate path on the server, if it is being passed in + debug("type of request.certificatePath: " + typeof(request.certificatePath)); + debug("request.certificatePath: " + request.certificatePath); + if (request.certificatePath !== "" && request.certificatePath !== undefined) { + debug("Adding COPY certificate.pem command"); + + dockerFileContents = dockerFileContents + "\n" + "COPY certificate.pem %s"; + dockerFileContents = util.format(dockerFileContents, request.certificatePath); + } + // Create a Docker file with dockerFileContents let dockerFilePath = projDir + "/Dockerfile"; fs.writeFile(dockerFilePath, dockerFileContents, function(err) { @@ -1723,10 +1855,10 @@ export class TransactionContext extends events.EventEmitter { tx.setPayload(chaincodeDeploymentSpec.toBuffer()); // - // Set the transaction UUID + // Set the transaction ID // - tx.setTxid(sdk_util.GenerateUUID()); + tx.setTxid(hash); // // Set the transaction timestamp @@ -2039,16 +2171,23 @@ export class Peer { private peerClient:any; /** - * Constructor for a peer given the endpoint config for the peer. - * @param {string} url The URL of - * @param {Chain} The chain of which this peer is a member. + * Constructs a Peer given its endpoint configuration settings + * and returns the new Peer. + * @param {string} url The URL with format of "grpcs://host:port". + * @param {Chain} chain The chain of which this peer is a member. + * @param {GRPCOptions} optional GRPC options to use with the gRPC, + * protocol (that is, with TransportCredentials) including a root + * certificate file, in PEM format, and hostnameOverride. A certificate + * is required when using the grpcs (TLS) protocol. * @returns {Peer} The new peer. */ - constructor(url:string, chain:Chain, pem:string) { + constructor(url:string, chain:Chain, opts:GRPCOptions) { this.url = url; this.chain = chain; + let pem = getPemFromOpts(opts); + opts = getOptsFromOpts(opts); this.ep = new Endpoint(url,pem); - this.peerClient = new _fabricProto.Peer(this.ep.addr, this.ep.creds); + this.peerClient = new _fabricProto.Peer(this.ep.addr, this.ep.creds, opts); } /** @@ -2103,7 +2242,6 @@ export class Peer { let event = new EventDeploySubmitted(response.msg.toString(), tx.chaincodeID); debug("EventDeploySubmitted event: %j", event); eventEmitter.emit("submitted", event); - self.waitForDeployComplete(eventEmitter,event); } } else { // Deploy completed with status "FAILURE" or "UNDEFINED" @@ -2117,7 +2255,6 @@ export class Peer { eventEmitter.emit("error", new EventTransactionError("the invoke response is missing the transaction UUID")); } else { eventEmitter.emit("submitted", new EventInvokeSubmitted(response.msg.toString())); - self.waitForInvokeComplete(eventEmitter); } } else { // Invoke completed with status "FAILURE" or "UNDEFINED" @@ -2139,43 +2276,6 @@ export class Peer { }); }; - /** - * TODO: Temporary hack to wait until the deploy event has hopefully completed. - * This does not detect if an error occurs in the peer or chaincode when deploying. - * When peer event listening is added to the SDK, this will be implemented correctly. - */ - private waitForDeployComplete(eventEmitter:events.EventEmitter, submitted:EventDeploySubmitted): void { - let waitTime = this.chain.getDeployWaitTime(); - debug("waiting %d seconds before emitting deploy complete event",waitTime); - setTimeout( - function() { - let event = new EventDeployComplete( - submitted.uuid, - submitted.chaincodeID, - "TODO: get actual results; waited "+waitTime+" seconds and assumed deploy was successful" - ); - eventEmitter.emit("complete",event); - }, - waitTime * 1000 - ); - } - - /** - * TODO: Temporary hack to wait until the deploy event has hopefully completed. - * This does not detect if an error occurs in the peer or chaincode when deploying. - * When peer event listening is added to the SDK, this will be implemented correctly. - */ - private waitForInvokeComplete(eventEmitter:events.EventEmitter): void { - let waitTime = this.chain.getInvokeWaitTime(); - debug("waiting %d seconds before emitting invoke complete event",waitTime); - setTimeout( - function() { - eventEmitter.emit("complete",new EventInvokeComplete("waited "+waitTime+" seconds and assumed invoke was successful")); - }, - waitTime * 1000 - ); - } - /** * Remove the peer from the chain. */ @@ -2196,15 +2296,24 @@ class Endpoint { constructor(url:string, pem?:string) { let purl = parseUrl(url); - let protocol = purl.protocol.toLowerCase(); + var protocol; + if (purl.protocol) { + protocol = purl.protocol.toLowerCase().slice(0,-1); + } if (protocol === 'grpc') { this.addr = purl.host; this.creds = grpc.credentials.createInsecure(); - } else if (protocol === 'grpcs') { + } + else if (protocol === 'grpcs') { this.addr = purl.host; this.creds = grpc.credentials.createSsl(new Buffer(pem)); - } else { - throw Error("invalid protocol: " + protocol); + } + else { + var error = new Error(); + error.name = "InvalidProtocol"; + error.message = "Invalid protocol: " + protocol + + ". URLs must begin with grpc:// or grpcs://" + throw error; } } } @@ -2225,16 +2334,14 @@ class MemberServicesImpl implements MemberServices { * @param config The config information required by this member services implementation. * @returns {MemberServices} A MemberServices object. */ - constructor(url:string,pem:string) { + constructor(url:string,opts:GRPCOptions) { + var pem = getPemFromOpts(opts); + opts = getOptsFromOpts(opts); let ep = new Endpoint(url,pem); - var options = { - 'grpc.ssl_target_name_override' : 'tlsca', - 'grpc.default_authority': 'tlsca' - }; - this.ecaaClient = new _caProto.ECAA(ep.addr, ep.creds, options); - this.ecapClient = new _caProto.ECAP(ep.addr, ep.creds, options); - this.tcapClient = new _caProto.TCAP(ep.addr, ep.creds, options); - this.tlscapClient = new _caProto.TLSCAP(ep.addr, ep.creds, options); + this.ecaaClient = new _caProto.ECAA(ep.addr, ep.creds, opts); + this.ecapClient = new _caProto.ECAP(ep.addr, ep.creds, opts); + this.tcapClient = new _caProto.TCAP(ep.addr, ep.creds, opts); + this.tlscapClient = new _caProto.TLSCAP(ep.addr, ep.creds, opts); this.cryptoPrimitives = new crypto.Crypto(DEFAULT_HASH_ALGORITHM, DEFAULT_SECURITY_LEVEL); } @@ -2270,6 +2377,9 @@ class MemberServicesImpl implements MemberServices { this.cryptoPrimitives.setHashAlgorithm(hashAlgorithm); } + /** + * Get the crypto object. + */ getCrypto():crypto.Crypto { return this.cryptoPrimitives; } @@ -2285,10 +2395,25 @@ class MemberServicesImpl implements MemberServices { debug("MemberServicesImpl.register: req=%j", req); if (!req.enrollmentID) return cb(new Error("missing req.enrollmentID")); if (!registrar) return cb(new Error("chain registrar is not set")); + // Create proto request let protoReq = new _caProto.RegisterUserReq(); protoReq.setId({id:req.enrollmentID}); protoReq.setRole(rolesToMask(req.roles)); protoReq.setAffiliation(req.affiliation); + let attrs = req.attributes; + if (Array.isArray(attrs)) { + let pattrs = []; + for (var i = 0; i < attrs.length; i++) { + var attr = attrs[i]; + var pattr = new _caProto.Attribute(); + if (attr.name) pattr.setName(attr.name); + if (attr.value) pattr.setValue(attr.value); + if (attr.notBefore) pattr.setNotBefore(attr.notBefore); + if (attr.notAfter) pattr.setNotAfter(attr.notAfter); + pattrs.push(pattr); + } + protoReq.setAttributes(pattrs); + } // Create registrar info let protoRegistrar = new _caProto.Registrar(); protoRegistrar.setId({id:registrar.getName()}); @@ -2537,8 +2662,8 @@ class MemberServicesImpl implements MemberServices { } // end MemberServicesImpl -function newMemberServices(url,pem) { - return new MemberServicesImpl(url,pem); +function newMemberServices(url:string,opts:GRPCOptions) { + return new MemberServicesImpl(url,opts); } /** @@ -2619,10 +2744,6 @@ function isFunction(fcn:any):boolean { function parseUrl(url:string):any { // TODO: find ambient definition for url var purl = urlParser.parse(url, true); - var protocol = purl.protocol; - if (endsWith(protocol, ":")) { - purl.protocol = protocol.slice(0, -1); - } return purl; } @@ -2651,6 +2772,29 @@ function rolesToMask(roles?:string[]):number { return mask; } +// Get the PEM from the options +function getPemFromOpts(opts:any):string { + if (isObject(opts)) return opts.pem; + return opts; +} + +// Normalize opts +function getOptsFromOpts(opts:any):GRPCOptions { + if (isObject(opts)) { + delete opts.pem; + if (opts.hostnameOverride) { + opts['grpc.ssl_target_name_override'] = opts.hostnameOverride; + opts['grpc.default_authority'] = opts.hostnameOverride; + delete opts.hostnameOverride; + } + return opts; + } + if (isString(opts)) { + // backwards compatible to handle pem as opts + return { pem: opts }; + } +} + function endsWith(str:string, suffix:string) { return str.length >= suffix.length && str.substr(str.length - suffix.length) === suffix; }; @@ -2683,7 +2827,7 @@ export function newChain(name) { export function getChain(chainName, create) { let chain = _chains[chainName]; if (!chain && create) { - chain = newChain(name); + chain = newChain(chainName); } return chain; } @@ -2694,3 +2838,172 @@ export function getChain(chainName, create) { export function newFileKeyValStore(dir:string):KeyValStore { return new FileKeyValStore(dir); } + +/** + * The ChainCodeCBE is used internal to the EventHub to hold chaincode event registration callbacks. + */ +export class ChainCodeCBE { + // chaincode id + ccid: string; + // event name regex filter + eventNameFilter: RegExp; + // callback function to invoke on successful filter match + cb: Function; + constructor(ccid: string, eventNameFilter: string, cb: Function) { + this.ccid = ccid; + this.eventNameFilter = new RegExp(eventNameFilter); + this.cb = cb; + } +} + +/** + * The EventHub is used to distribute events from a specific event source(peer) + */ +export class EventHub { + // peer addr to connect to + private ep: Endpoint; + // grpc options + private opts: GRPCOptions; + // grpc events interface + private events: any; + // grpc event client interface + private client: any; + // grpc chat streaming interface + private call: any; + // hashtable of clients registered for chaincode events + private chaincodeRegistrants: any; + // set of clients registered for block events + private blockRegistrants: any; + // hashtable of clients registered for transactional events + private txRegistrants: any; + // fabric connection state of this eventhub + private connected: boolean; + constructor() { + this.chaincodeRegistrants = new HashTable(); + this.blockRegistrants = new Set(); + this.txRegistrants = new HashTable(); + this.ep = null; + this.connected = false; + } + + public setPeerAddr(peeraddr: string, opts?:GRPCOptions) { + let pem = getPemFromOpts(opts); + this.opts = getOptsFromOpts(opts); + this.ep = new Endpoint(peeraddr,pem); + } + + public isconnected() { + return this.connected; + } + + public connect() { + if (this.connected) return; + if (!this.ep) throw Error("Must set peer address before connecting."); + this.events = grpc.load(__dirname + "/protos/events.proto" ).protos; + this.client = new this.events.Events(this.ep.addr, this.ep.creds, this.opts); + this.call = this.client.chat(); + this.connected = true; + this.registerBlockEvent(this.txCallback); + + let eh = this; // for callback context + this.call.on('data', function(event) { + if ( event.Event == "chaincodeEvent" ) { + var cbtable = eh.chaincodeRegistrants.get(event.chaincodeEvent.chaincodeID); + if( !cbtable ) { + return; + } + cbtable.forEach(function (cbe) { + if ( cbe.eventNameFilter.test(event.chaincodeEvent.eventName)) { + cbe.cb(event.chaincodeEvent); + } + }); + } else if ( event.Event == "block") { + eh.blockRegistrants.forEach(function(cb){ + cb(event.block); + }); + } + }); + this.call.on('end', function() { + eh.call.end(); + // clean up Registrants - should app get notified? + eh.chaincodeRegistrants.clear(); + eh.blockRegistrants.clear(); + }); + } + + public disconnect() { + if (!this.connected) return; + this.unregisterBlockEvent(this.txCallback); + this.call.end(); + this.connected = false; + } + + public registerChaincodeEvent(ccid: string, eventname: string, callback: Function): ChainCodeCBE { + if (!this.connected) return; + let cb = new ChainCodeCBE(ccid, eventname, callback); + let cbtable = this.chaincodeRegistrants.get(ccid); + if ( !cbtable ) { + cbtable = new Set(); + this.chaincodeRegistrants.put(ccid, cbtable); + cbtable.add(cb); + let register = { register: { events: [ { eventType: "CHAINCODE", chaincodeRegInfo:{ chaincodeID: ccid , eventName: "" }} ] }}; + this.call.write(register); + } else { + cbtable.add(cb); + } + return cb; + } + + public unregisterChaincodeEvent(cbe: ChainCodeCBE){ + if (!this.connected) return; + let cbtable = this.chaincodeRegistrants.get(cbe.ccid); + if ( !cbtable ) { + debug("No event registration for ccid %s ", cbe.ccid); + return; + } + cbtable.delete(cbe); + if( cbtable.size <= 0 ) { + var unregister = { unregister: { events: [ { eventType: "CHAINCODE", chaincodeRegInfo:{ chaincodeID: cbe.ccid, eventName: "" }} ] }}; + this.chaincodeRegistrants.remove(cbe.ccid); + this.call.write(unregister); + } + } + + public registerBlockEvent(callback:Function){ + if (!this.connected) return; + this.blockRegistrants.add(callback); + if(this.blockRegistrants.size==1) { + var register = { register: { events: [ { eventType: "BLOCK"} ] }}; + this.call.write(register); + } + } + + public unregisterBlockEvent(callback:Function){ + if (!this.connected) return; + if(this.blockRegistrants.size<=1) { + var unregister = { unregister: { events: [ { eventType: "BLOCK"} ] }}; + this.call.write(unregister); + } + this.blockRegistrants.delete(callback); + } + + public registerTxEvent(txid:string, callback:Function){ + debug("reg txid "+txid); + this.txRegistrants.put(txid, callback); + } + + public unregisterTxEvent(txid:string){ + this.txRegistrants.remove(txid); + } + + private txCallback = (event) => { + debug("txCallback event=%j", event); + var eh = this; + event.transactions.forEach(function(transaction){ + debug("transaction.txid="+transaction.txid); + var cb = eh.txRegistrants.get(transaction.txid); + if (cb) + cb(transaction.txid); + }); + } +} diff --git a/sdk/node/src/sdk_util.ts b/sdk/node/src/sdk_util.ts index cc3bdb6d251..0253e538b45 100644 --- a/sdk/node/src/sdk_util.ts +++ b/sdk/node/src/sdk_util.ts @@ -149,7 +149,8 @@ export function GenerateTarGz(src, dest, cb) { ".yaml", ".json", ".c", - ".h" + ".h", + ".pem" ]; // Create the pack stream specifying the ignore/filtering function diff --git a/sdk/node/test/fixtures/tlsca.cert b/sdk/node/test/fixtures/tlsca.cert new file mode 100644 index 00000000000..990911284de --- /dev/null +++ b/sdk/node/test/fixtures/tlsca.cert @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBwTCCAUegAwIBAgIBATAKBggqhkjOPQQDAzApMQswCQYDVQQGEwJVUzEMMAoG +A1UEChMDSUJNMQwwCgYDVQQDEwNPQkMwHhcNMTYwMTIxMjI0OTUxWhcNMTYwNDIw +MjI0OTUxWjApMQswCQYDVQQGEwJVUzEMMAoGA1UEChMDSUJNMQwwCgYDVQQDEwNP +QkMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAR6YAoPOwMzIVi+P83V79I6BeIyJeaM +meqWbmwQsTRlKD6g0L0YvczQO2vp+DbxRN11okGq3O/ctcPzvPXvm7Mcbb3whgXW +RjbsX6wn25tF2/hU6fQsyQLPiJuNj/yxknSjQzBBMA4GA1UdDwEB/wQEAwIChDAP +BgNVHRMBAf8EBTADAQH/MA0GA1UdDgQGBAQBAgMEMA8GA1UdIwQIMAaABAECAwQw +CgYIKoZIzj0EAwMDaAAwZQIxAITGmq+x5N7Q1jrLt3QFRtTKsuNIosnlV4LR54l3 +yyDo17Ts0YLyC0pZQFd+GURSOQIwP/XAwoMcbJJtOVeW/UL2EOqmKA2ygmWX5kte +9Lngf550S6gPEWuDQOcY95B+x3eH +-----END CERTIFICATE----- diff --git a/sdk/node/test/unit/asset-mgmt-with-dynamic-roles.js b/sdk/node/test/unit/asset-mgmt-with-dynamic-roles.js new file mode 100644 index 00000000000..a6835505d66 --- /dev/null +++ b/sdk/node/test/unit/asset-mgmt-with-dynamic-roles.js @@ -0,0 +1,230 @@ +/** + * Copyright 2016 IBM + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Licensed Materials - Property of IBM + * © Copyright IBM Corp. 2016 + */ + +/** + * Simple asset management use case where authentication is performed + * with the help of TCerts only (use-case 1) or attributes only (use-case 2).*/ + +var hfc = require('../..'); +var test = require('tape'); +var util = require('util'); +var crypto = require('../../lib/crypto'); + +var chain, chaincodeID; +var chaincodeName = "mycc4"; +var deployer, alice, bob, assigner; +var aliceAccount = "12345-56789"; +var bobAccount = "23456-67890"; +var devMode = process.env.DEPLOY_MODE == 'dev'; + +// Create the chain and enroll users as deployer, assigner, and nonAssigner (who doesn't have privilege to assign. +function setup(cb) { + console.log("initializing ..."); + chain = hfc.newChain("testChain"); + chain.setKeyValStore(hfc.newFileKeyValStore("/tmp/keyValStore")); + chain.setMemberServicesUrl("grpc://localhost:7054"); + chain.addPeer("grpc://localhost:7051"); + if (devMode) chain.setDevMode(true); + console.log("enrolling deployer ..."); + chain.enroll("WebAppAdmin", "DJY27pEnl16d", function (err, user) { + if (err) return cb(err); + deployer = user; + chain.setRegistrar(user); + console.log("enrolling assigner2 ..."); + registerAndEnroll("assigner2",[{name:'role',value:'assigner'}], function(err,user) { + if (err) return cb(err); + assigner = user; + console.log("enrolling alice2 ..."); + registerAndEnroll("alice2",[{name:'role',value:'client'},{name:'account',value:aliceAccount}], function(err,user) { + if (err) return cb(err); + alice = user; + console.log("enrolling bob2 ..."); + registerAndEnroll("bob2",[{name:'role',value:'client'},{name:'account',value:bobAccount}], function(err,user) { + if (err) return cb(err); + bob = user; + return deploy(cb); + }); + }); + }); + }); +} + +// Deploy assetmgmt_with_roles with the name of the assigner role in the metadata +function deploy(cb) { + console.log("deploying with the role name 'assigner' in metadata ..."); + var req = { + fcn: "init", + args: [], + metadata: new Buffer("assigner") + }; + if (devMode) { + req.chaincodeName = chaincodeName; + } else { + req.chaincodePath = "github.com/asset_management_with_roles/"; + } + var tx = deployer.deploy(req); + tx.on('submitted', function (results) { + console.log("deploy submitted: %j", results); + }); + tx.on('complete', function (results) { + console.log("deploy complete: %j", results); + chaincodeID = results.chaincodeID; + console.log("chaincodeID:" + chaincodeID); + return cb(); + }); + tx.on('error', function (err) { + console.log("deploy error: %j", err.toString()); + return cb(err); + }); +} + +function assignOwner(user,owner,cb) { + var req = { + chaincodeID: chaincodeID, + fcn: "assign", + args: ["MyAsset",owner], + attrs: ['role'] + }; + console.log("assign: invoking %j",req); + var tx = user.invoke(req); + tx.on('submitted', function (results) { + console.log("assign transaction ID: %j", results); + }); + tx.on('complete', function (results) { + console.log("assign invoke complete: %j", results); + return cb(); + }); + tx.on('error', function (err) { + console.log("assign invoke error: %j", err); + return cb(err); + }); +} + +// Check to see if the owner of the asset is +function checkOwner(user,ownerAccount,cb) { + var req = { + chaincodeID: chaincodeID, + fcn: "query", + args: ["MyAsset"] + }; + console.log("query: querying %j",req); + var tx = user.query(req); + tx.on('complete', function (results) { + var realOwner = results.result; + //console.log("realOwner: " + realOwner); + + if (ownerAccount == results.result) { + console.log("correct owner: %s",ownerAccount); + return cb(); + } else { + return cb(new Error(util.format("incorrect owner: expected=%s, real=%s",ownerAccount,realOwner))); + } + }); + tx.on('error', function (err) { + console.log("assign invoke error: %j", err); + return cb(err); + }); +} + +test('setup asset management with roles', function (t) { + t.plan(1); + + console.log("setup asset management with roles"); + + setup(function(err) { + if (err) { + t.fail("error: "+err.toString()); + // Exit the test script after a failure + process.exit(1); + } else { + t.pass("setup successful"); + } + }); +}); + +test('assign asset management with roles', function (t) { + t.plan(1); + alice.getUserCert(["role", "account"], function (err, aliceCert) { + if (err) { + fail(t, "Failed getting Application certificate for Alice."); + // Exit the test script after a failure + process.exit(1); + } + assignOwner(assigner, aliceCert.encode().toString('base64'), function(err) { + if (err) { + t.fail("error: "+err.toString()); + // Exit the test script after a failure + process.exit(1); + } else { + checkOwner(assigner, aliceAccount, function(err) { + if(err){ + t.fail("error: "+err.toString()); + // Exit the test script after a failure + process.exit(1); + } else { + t.pass("assign successful"); + } + }); + } + }); + }); +}); + +test('not assign asset management with roles', function (t) { + t.plan(1); + + bob.getUserCert(["role", "account"], function (err, bobCert) { + if (err) { + fail(t, "Failed getting Application certificate for Alice."); + // Exit the test script after a failure + process.exit(1); + } + assignOwner(alice, bobCert.encode().toString('base64'), function(err) { + if (err) { + t.fail("error: "+err.toString()); + // Exit the test script after a failure + process.exit(1); + } else { + checkOwner(alice, bobAccount, function(err) { + if(err){ + t.pass("assign successful"); + } else { + err = new Error ("this user should not have been allowed to assign"); + t.fail("error: "+err.toString()); + // Exit the test script after a failure + process.exit(1); + } + }); + } + }); + }); +}); + +function registerAndEnroll(name, attrs, cb) { + console.log("registerAndEnroll name=%s attrs=%j",name,attrs); + var registrationRequest = { + roles: [ 'client' ], + enrollmentID: name, + affiliation: "bank_a", + attributes: attrs + }; + chain.registerAndEnroll(registrationRequest,cb); +} + diff --git a/sdk/node/test/unit/chain-tests.js b/sdk/node/test/unit/chain-tests.js index 62925d4c644..debe3091550 100644 --- a/sdk/node/test/unit/chain-tests.js +++ b/sdk/node/test/unit/chain-tests.js @@ -48,13 +48,18 @@ if (fs.existsSync("tlsca.cert")) { chain.setMemberServicesUrl("grpc://localhost:7054"); } chain.addPeer("grpc://localhost:7051"); +chain.eventHubConnect("grpc://localhost:7053"); + +process.on('exit', function (){ + chain.eventHubDisconnect(); +}); // // Set the chaincode deployment mode to either developent mode (user runs chaincode) // or network mode (code package built and sent to the peer). // -var mode = process.env['DEPLOY_MODE']; +var mode = process.env['DEPLOY_MODE']; console.log("$DEPLOY_MODE: " + mode); if (mode === 'dev') { chain.setDevMode(true); @@ -104,7 +109,7 @@ var deltaAB = "1"; function getUser(name, cb) { chain.getUser(name, function (err, user) { if (err) return cb(err); - if (user.isEnrolled()) return cb(null,user); + if (user.isEnrolled()) return cb(null, user); // User is not enrolled yet, so perform both registration and enrollment // The chain registrar is already set inside 'Set chain registrar' test var registrationRequest = { @@ -128,6 +133,165 @@ function fail(t, msg, err) { t.end(err); } +// +// Test adding an invalid peer (omit protocol or use invalid protocol in URL) +// +test('Add invalid peer URLs to the chain', function (t) { + + //list of valid protocol prefixes to test + var prefixes = [ + "", + "grpcio://", + "http://", + " " + ]; + + t.plan(prefixes.length); + + var chain_test; + + //loop through the prefixes + prefixes.forEach(function (prefix, index) { + + chain_test = hfc.newChain("chain_test" + index); + + try { + chain_test.addPeer(prefix + "://localhost:7051", new Buffer(32)); + t.fail("Should not be able to add peer with URL starting with " + prefix); + } + catch (err) { + if (err.name === 'InvalidProtocol') { + t.pass("Returned 'InvalidProtocol' error for URL starting with " + prefix); + } + else { + t.fail("Failed to return 'InvalidProtocol' error for URL starting with " + prefix); + } + } + }) + +}); + + +// +// Test adding a valid peer (URL must start with grpc or grpcs) +// +test('Add valid peer URLs to the chain', function (t) { + + //list of valid protocol prefixes to test + var prefixes = [ + "grpc", + "GRPC", + "grpcs", + "GRPCS" + ]; + + t.plan(prefixes.length); + + var chain_test2; + + //loop through the prefixes + prefixes.forEach(function (prefix, index) { + + chain_test2 = hfc.newChain("chain_test2" + index); + + try { + chain_test2.addPeer(prefix + "://localhost:7051", + fs.readFileSync(__dirname + "/../fixtures/tlsca.cert")); + t.pass("Successfully added peer with URL starting with " + prefix + "://"); + } + catch (err) { + t.fail("Could not add peer to the chain with URL starting with " + prefix + "://"); + } + }) + +}); + +// +// Test adding multiple peers to the chain +// +test('Add multiple peers to the chain', function (t) { + + t.plan(1); + + var chain_multiple = hfc.newChain("chain_multiple"); + + var peers = [ + "grpc://localhost:7051", + "grpc://localhost:7052", + "grpc://localhost:7053", + "grpc://localhost:7054" + ]; + + peers.forEach(function (peer) { + + try { + chain_multiple.addPeer(peer); + } + catch (err) { + t.fail("Failed to add multiple peers to the chain"); + } + + }) + + //check to see we have the correct number of peers + if (chain_multiple.getPeers().length == peers.length) { + t.pass("Successfully added multiple peers to the chain(" + peers.length + + " expected | " + chain_multiple.getPeers().length + " found)"); + + } + else { + t.fail("Failed to add multiple peers to the chain(" + peers.length + + " expected | " + chain_multiple.getPeers().length + " found)"); + } +}); + +// +// Test adding duplicate peers to the chain +// +test('Catch duplicate peer added to chain', function (t) { + + t.plan(2); + + var chain_duplicate = hfc.newChain("chain_duplicate"); + + var peers = [ + "grpc://localhost:7051", + "grpc://localhost:7052", + "grpc://localhost:7053", + "grpc://localhost:7051" + ]; + + //we have one duplicate to set the expected value + var expected = peers.length - 1; + + peers.forEach(function (peer) { + + try { + chain_duplicate.addPeer(peer); + } + catch (err) { + if (err.name != "DuplicatePeer"){ + t.fail("Unexpected error " + err.toString()); + } + else { + t.pass("Expected error message 'DuplicatePeer' thrown"); + } + } + + }) + + //check to see we have the correct number of peers + if (chain_duplicate.getPeers().length == expected) { + t.pass("Duplicate peer not added to the chain(" + expected + + " expected | " + chain_duplicate.getPeers().length + " found)"); + + } + else { + t.fail("Failed to detect duplicate peer (" + expected + + " expected | " + chain_duplicate.getPeers().length + " found)"); + } +}); + // // Set Invalid security level and hash algorithm. // @@ -290,35 +454,35 @@ test('Register and enroll a new user', function (t) { // the missing parameter. // -test('Deploy with missing chaincodeName or chaincodePath', function(t) { - t.plan(1); - - // Construct the deploy request with a missing chaincodeName/chaincodePath - var deployRequest = { - // Function to trigger - fcn: "init", - // Arguments to the initializing function - args: ["a", initA, "b", initB] - }; - - // Trigger the deploy transaction - var deployTx = test_user_Member1.deploy(deployRequest); - - // Print the deploy results - deployTx.on('complete', function(results) { - // Deploy request completed successfully - console.log(util.format("deploy results: %j",results)); - // Set the testChaincodeID for subsequent tests - testChaincodeID = results.chaincodeID; - console.log("testChaincodeID:" + testChaincodeID); - t.fail(util.format("Successfully deployed chaincode: request=%j, response=%j", deployRequest, results)); - // Exit the test script after a failure - process.exit(1); - }); - deployTx.on('error', function(err) { - // Deploy request failed - t.pass(util.format("Failed to deploy chaincode: request=%j, error=%j",deployRequest,err)); - }); +test('Deploy with missing chaincodeName or chaincodePath', function (t) { + t.plan(1); + + // Construct the deploy request with a missing chaincodeName/chaincodePath + var deployRequest = { + // Function to trigger + fcn: "init", + // Arguments to the initializing function + args: ["a", initA, "b", initB] + }; + + // Trigger the deploy transaction + var deployTx = test_user_Member1.deploy(deployRequest); + + // Print the deploy results + deployTx.on('complete', function (results) { + // Deploy request completed successfully + console.log(util.format("deploy results: %j", results)); + // Set the testChaincodeID for subsequent tests + testChaincodeID = results.chaincodeID; + console.log("testChaincodeID:" + testChaincodeID); + t.fail(util.format("Successfully deployed chaincode: request=%j, response=%j", deployRequest, results)); + // Exit the test script after a failure + process.exit(1); + }); + deployTx.on('error', function (err) { + // Deploy request failed + t.pass(util.format("Failed to deploy chaincode: request=%j, error=%j", deployRequest, err)); + }); }); // @@ -327,43 +491,43 @@ test('Deploy with missing chaincodeName or chaincodePath', function(t) { // a local directory in the user's $GOPATH. // -test('Deploy a chaincode by enrolled user', function(t) { - t.plan(1); - - // Construct the deploy request - var deployRequest = { - // Function to trigger - fcn: "init", - // Arguments to the initializing function - args: ["a", initA, "b", initB] - }; - - if (mode === 'dev') { - // Name required for deploy in development mode - deployRequest.chaincodeName = testChaincodeName; - } else { - // Path (under $GOPATH) required for deploy in network mode - deployRequest.chaincodePath = testChaincodePath; - } - - // Trigger the deploy transaction - var deployTx = test_user_Member1.deploy(deployRequest); - - // Print the deploy results - deployTx.on('complete', function(results) { - // Deploy request completed successfully - console.log(util.format("deploy results: %j",results)); - // Set the testChaincodeID for subsequent tests - testChaincodeID = results.chaincodeID; - console.log("testChaincodeID:" + testChaincodeID); - t.pass(util.format("Successfully deployed chaincode: request=%j, response=%j", deployRequest, results)); - }); - deployTx.on('error', function(err) { - // Deploy request failed - t.fail(util.format("Failed to deploy chaincode: request=%j, error=%j",deployRequest,err)); - // Exit the test script after a failure - process.exit(1); - }); +test('Deploy a chaincode by enrolled user', function (t) { + t.plan(1); + + // Construct the deploy request + var deployRequest = { + // Function to trigger + fcn: "init", + // Arguments to the initializing function + args: ["a", initA, "b", initB] + }; + + if (mode === 'dev') { + // Name required for deploy in development mode + deployRequest.chaincodeName = testChaincodeName; + } else { + // Path (under $GOPATH) required for deploy in network mode + deployRequest.chaincodePath = testChaincodePath; + } + + // Trigger the deploy transaction + var deployTx = test_user_Member1.deploy(deployRequest); + + // Print the deploy results + deployTx.on('complete', function (results) { + // Deploy request completed successfully + console.log(util.format("deploy results: %j", results)); + // Set the testChaincodeID for subsequent tests + testChaincodeID = results.chaincodeID; + console.log("testChaincodeID:" + testChaincodeID); + t.pass(util.format("Successfully deployed chaincode: request=%j, response=%j", deployRequest, results)); + }); + deployTx.on('error', function (err) { + // Deploy request failed + t.fail(util.format("Failed to deploy chaincode: request=%j, error=%j", deployRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); }); // @@ -465,10 +629,10 @@ test('Query existing chaincode state by enrolled user with batch size of 100', f t.pass(util.format("Successfully queried existing chaincode state: request=%j, response=%j, value=%s", queryRequest, results, results.result.toString())); }); queryTx.on('error', function (err) { - // Query failed - t.fail(util.format("Failed to query existing chaincode state: request=%j, error=%j", queryRequest, err)); - // Exit the test script after a failure - process.exit(1); + // Query failed + t.fail(util.format("Failed to query existing chaincode state: request=%j, error=%j", queryRequest, err)); + // Exit the test script after a failure + process.exit(1); }); }); @@ -503,7 +667,7 @@ test('Query non-existing chaincode state by enrolled user', function (t) { }); queryTx.on('error', function (err) { // Query failed - t.pass(util.format("Failed to query non-existing chaincode state: request=%j, error=%j",queryRequest,err)); + t.pass(util.format("Failed to query non-existing chaincode state: request=%j, error=%j", queryRequest, err)); }); }); @@ -538,7 +702,7 @@ test('Query non-existing chaincode function by enrolled user', function (t) { }); queryTx.on('error', function (err) { // Query failed - t.pass(util.format("Failed to query non-existing chaincode function: request=%j, error=%j",queryRequest,err)); + t.pass(util.format("Failed to query non-existing chaincode function: request=%j, error=%j", queryRequest, err)); }); }); @@ -565,7 +729,7 @@ test('Invoke with missing chaincodeID', function (t) { // Print the invoke results invokeTx.on('submitted', function (results) { // Invoke transaction submitted successfully - t.fail(util.format("Successfully submitted chaincode invoke transaction: request=%j, response=%j", invokeRequest,results)); + t.fail(util.format("Successfully submitted chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); // Exit the test script after a failure process.exit(1); }); @@ -599,7 +763,8 @@ test('Invoke a chaincode by enrolled user', function (t) { // Print the invoke results invokeTx.on('submitted', function (results) { // Invoke transaction submitted successfully - t.pass(util.format("Successfully submitted chaincode invoke transaction: request=%j, response=%j", invokeRequest,results)); + t.pass(util.format("Successfully submitted chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + chain.eventHubDisconnect(); }); invokeTx.on('error', function (err) { // Invoke transaction submission failed diff --git a/sdk/node/test/unit/event-tests.js b/sdk/node/test/unit/event-tests.js new file mode 100644 index 00000000000..d3c845f39ac --- /dev/null +++ b/sdk/node/test/unit/event-tests.js @@ -0,0 +1,516 @@ +/** + * Copyright London Stock Exchange 2016 All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var hfc = require('../..'); +var test = require('tape'); +var util = require('util'); +var fs = require('fs'); + +// +// Create a test chain +// + +var chain = hfc.newChain("testChain"); + +// +// Configure the test chain +// +// Set the directory for the local file-based key value store, point to the +// address of the membership service, and add an associated peer node. +// +// If the "tlsca.cert" file exists then the client-sdk will +// try to connect to the member services using TLS. +// The "tlsca.cert" is supposed to contain the root certificate (in PEM format) +// to be used to authenticate the member services certificate. +// + +chain.setKeyValStore(hfc.newFileKeyValStore('/tmp/keyValStore')); +if (fs.existsSync("tlsca.cert")) { + chain.setMemberServicesUrl("grpcs://localhost:7054", fs.readFileSync('tlsca.cert')); +} else { + chain.setMemberServicesUrl("grpc://localhost:7054"); +} +chain.addPeer("grpc://localhost:7051"); +chain.eventHubConnect("grpc://localhost:7053"); + +process.on('exit', function (){ + chain.eventHubDisconnect(); +}); + +// +// Set the chaincode deployment mode to either developent mode (user runs chaincode) +// or network mode (code package built and sent to the peer). +// + +var mode = process.env['DEPLOY_MODE']; +console.log("$DEPLOY_MODE: " + mode); +if (mode === 'dev') { + chain.setDevMode(true); +} else { + chain.setDevMode(false); +} + +// +// Configure test users +// +// Set the values required to register a user with the certificate authority. +// + +test_user1 = { + name: "WebApp_user1", + role: 1, // Client + affiliation: "bank_a" +}; + +// +// Declare variables to store the test user Member objects returned after +// registration and enrollment as they will be used across multiple tests. +// + +var test_user_Member1; + +// +// Declare test variables that will be used to store chaincode values used +// across multiple tests. +// + +// Path to the local directory containing the chaincode project under $GOPATH +var testChaincodePath = "github.com/eventsender/"; + +// Chaincode hash that will be filled in by the deployment operation or +// chaincode name that will be referenced in development mode. +var testChaincodeName = "mycc5"; + +// testChaincodeID will store the chaincode ID value after deployment. +var testChaincodeID; + +function getUser(name, cb) { + chain.getUser(name, function (err, user) { + if (err) return cb(err); + if (user.isEnrolled()) return cb(null, user); + // User is not enrolled yet, so perform both registration and enrollment + // The chain registrar is already set inside 'Set chain registrar' test + var registrationRequest = { + enrollmentID: name, + affiliation: "bank_a" + }; + user.registerAndEnroll(registrationRequest, function (err) { + if (err) cb(err, null) + cb(null, user) + }); + }); +} + +// +// Enroll the WebAppAdmin member and set as registrar then register +// and enroll new user with certificate authority +// This test is a prerequisite to further tests +// + +test('Set up chain prerequisites', function (t) { + t.plan(5); + + // Get the WebAppAdmin member + chain.getMember("WebAppAdmin", function (err, WebAppAdmin) { + if (err) { + t.fail("Failed to get WebAppAdmin member " + " ---> " + err); + t.end(err); + process.exit(1); + } else { + t.pass("Successfully got WebAppAdmin member" /*+ " ---> " + JSON.stringify(crypto)*/); + + // Enroll the WebAppAdmin member with the certificate authority using + // the one time password hard coded inside the membersrvc.yaml. + pw = "DJY27pEnl16d"; + WebAppAdmin.enroll(pw, function (err, crypto) { + if (err) { + t.fail("Failed to enroll WebAppAdmin member " + " ---> " + err); + t.end(err); + process.exit(1); + } else { + t.pass("Successfully enrolled WebAppAdmin member" /*+ " ---> " + JSON.stringify(crypto)*/); + + // Confirm that the WebAppAdmin token has been created in the key value store + path = chain.getKeyValStore().dir + "/member." + WebAppAdmin.getName(); + + fs.exists(path, function (exists) { + if (exists) { + t.pass("Successfully stored client token" /*+ " ---> " + WebAppAdmin.getName()*/); + } else { + t.fail("Failed to store client token for " + WebAppAdmin.getName() + " ---> " + err); + t.end(err); + // Exit the test script after a failure + process.exit(1); + } + chain.setRegistrar(WebAppAdmin); + // Register and enroll test_user + getUser(test_user1.name, function (err, user) { + if (err) { + t.fail("Failed to get " + test_user1.name + " ---> ", err); + t.end(err); + // Exit the test script after a failure + process.exit(1); + } else { + test_user_Member1 = user; + + t.pass("Successfully registered and enrolled " + test_user_Member1.getName()); + + // Confirm that the user token has been created in the key value store + path = chain.getKeyValStore().dir + "/member." + test_user1.name; + fs.exists(path, function (exists) { + if (exists) { + t.pass("Successfully stored client token" /*+ " ---> " + test_user1.name*/); + } else { + t.fail("Failed to store client token for " + test_user1.name + " ---> " + err); + t.end(err); + // Exit the test script after a failure + process.exit(1); + } + }); + } + }); + }); + } + }); + } + }); +}); + +// +// Create and issue a chaincode deploy request by the test user, who was +// registered and enrolled in the UT above. Deploy a testing chaincode from +// a local directory in the user's $GOPATH. +// + +test('Deploy a chaincode by enrolled user', function (t) { + t.plan(1); + + // Construct the deploy request + var deployRequest = { + // Function to trigger + fcn: "init", + // Arguments to the initializing function + args: [] + }; + + if (mode === 'dev') { + // Name required for deploy in development mode + deployRequest.chaincodeName = testChaincodeName; + } else { + // Path (under $GOPATH) required for deploy in network mode + deployRequest.chaincodePath = testChaincodePath; + } + + // Trigger the deploy transaction + var deployTx = test_user_Member1.deploy(deployRequest); + + // the deploy complete is triggered as a result of a fabric deploy + // complete event automatically when a event source is connected + deployTx.on('complete', function (results) { + // Deploy request completed successfully + console.log(util.format("deploy results: %j", results)); + // Set the testChaincodeID for subsequent tests + testChaincodeID = results.chaincodeID; + console.log("testChaincodeID:" + testChaincodeID); + t.pass(util.format("Successfully deployed chaincode: request=%j, response=%j", deployRequest, results)); + }); + deployTx.on('error', function (err) { + // Deploy request failed + t.fail(util.format("Failed to deploy chaincode: request=%j, error=%j", deployRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// Issue a chaincode invoke to generate event and listen for the event +// by registering with chaincode id and event name +// + +test('Invoke chaincode and have it generate an event', function (t) { + t.plan(2); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + + // register for chaincode event + var regid = eh.registerChaincodeEvent(testChaincodeID, "^evtsender$", function(event) { + timedout = false; + if (timeoutId) { + clearTimeout(timeoutId); + } + t.equal(event.payload.toString(), "Event 0," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(timedout) { + eh.unregisterChaincodeEvent(regid); + t.fail("Failed to receive chaincode event"); + process.exit(1); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); +// +// Issue a chaincode invoke to generate event and listen for the event +// on 2 registrations +// + +test('Invoke chaincode, have it generate an event, and receive event on 2 registrations', function (t) { + t.plan(3); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + var eventcount = 0; + + // register for chaincode event + var regid1 = eh.registerChaincodeEvent(testChaincodeID, "^evtsender$", function(event) { + eventcount++; + if (eventcount > 1) { + if (timeoutId) { + clearTimeout(timeoutId); + } + } + t.equal(event.payload.toString(), "Event 1," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid1); + }); + // register for chaincode event + var regid2 = eh.registerChaincodeEvent(testChaincodeID, "^evtsender$", function(event) { + eventcount++; + if (eventcount > 1) { + if (timeoutId) { + clearTimeout(timeoutId); + } + } + t.equal(event.payload.toString(), "Event 1," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid2); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(eventcount > 1) { + eh.unregisterChaincodeEvent(regid1); + eh.unregisterChaincodeEvent(regid2); + t.fail("Failed to receive chaincode event"); + process.exit(1); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// Issue a chaincode invoke to generate event and listen for the event +// by registering with chaincode id and wildcarded event name +// + +test('Generate chaincode event and receive it with wildcard', function (t) { + t.plan(2); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + + // register for chaincode event with wildcard event name + var regid = eh.registerChaincodeEvent(testChaincodeID, ".*", function(event) { + timedout = false; + if (timeoutId) { + clearTimeout(timeoutId); + } + t.equal(event.payload.toString(), "Event 2," + evtstring, "Successfully received expected chaincode event payload"); + eh.unregisterChaincodeEvent(regid); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(timedout) { + eh.unregisterChaincodeEvent(regid); + t.fail("Failed to receive chaincode event"); + process.exit(1); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// Issue a chaincode invoke to generate event and listen for the event +// by registering with chaincode id and a bogus event name +// + +test('Generate an event that fails to be received', function (t) { + t.plan(2); + + var evtstring = "event-test"; + // Construct the invoke request + var invokeRequest = { + // Name (hash) required for invoke + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "invoke", + // Parameters for the invoke function + args: [evtstring] + }; + var eh = chain.getEventHub(); + var duration = chain.getInvokeWaitTime() * 1000; + var timedout = true; + var timeoutId = null; + + // register for chaincode event with bogus event name + var regid = eh.registerChaincodeEvent(testChaincodeID, "bogus", function(event) { + timedout = false; + if (timeoutId) { + clearTimeout(timeoutId); + } + t.fail("Received chaincode event from bogus registration"); + eh.unregisterChaincodeEvent(regid); + process.exit(1); + }); + // Trigger the invoke transaction + var invokeTx = test_user_Member1.invoke(invokeRequest); + // set timout on event sent by chaincode invoke + timeoutId = setTimeout(function() { + if(timedout) { + eh.unregisterChaincodeEvent(regid); + t.pass("Failed to receive chaincode event"); + } + }, + duration); + + // Print the invoke results + invokeTx.on('complete', function (results) { + // Invoke transaction submitted successfully + t.pass(util.format("Successfully completed chaincode invoke transaction: request=%j, response=%j", invokeRequest, results)); + }); + invokeTx.on('error', function (err) { + // Invoke transaction submission failed + t.fail(util.format("Failed to submit chaincode invoke transaction: request=%j, error=%j", invokeRequest, err)); + // Exit the test script after a failure + process.exit(1); + }); +}); + +// +// +// Create and issue a chaincode query request by the test user, who was +// registered and enrolled in the UT above. Query a chaincode for +// number of events generated. +// + +test('Query chaincode state for number of events sent', function (t) { + t.plan(1); + + // Construct the query request + var queryRequest = { + // Name (hash) required for query + chaincodeID: testChaincodeID, + // Function to trigger + fcn: "query", + // Existing state variable to retrieve + args: [""] + }; + + // Trigger the query transaction + var queryTx = test_user_Member1.query(queryRequest); + + // Print the query results + queryTx.on('complete', function (results) { + var result = JSON.parse(results.result.toString()); + var count = parseInt(result.NoEvents); + t.equal(count, 4, "Successfully queried correct number of events generated."); + chain.eventHubDisconnect(); + }); + queryTx.on('error', function (err) { + // Query failed + t.fail(util.format("Failed to query chaincode state: request=%j, error=%j", queryRequest, err)); + t.end(err); + }); +}); + diff --git a/sdk/node/test/unit/registrar.js b/sdk/node/test/unit/registrar.js index 61489f98954..726b3f33c42 100644 --- a/sdk/node/test/unit/registrar.js +++ b/sdk/node/test/unit/registrar.js @@ -23,7 +23,7 @@ var test = require('tape'); var util = require('util'); var fs = require('fs'); -var keyValStorePath = "/tmp/keyValStore" +var keyValStorePath = "/tmp/keyValStore"; var keyValStorePath2 = keyValStorePath + "2"; // @@ -56,60 +56,97 @@ test('enroll again', function (t) { }); }); -// The registrar test +var chain,admin,webAdmin,webUser; + +// STEP1: Init the chain and enroll admin function registrarTest(cb) { - console.log("testRegistrar"); + console.log("registrarTest: STEP 1"); // // Create and configure the test chain // - var chain = hfc.newChain("testChain"); - var expect=""; - var found=""; - + chain = hfc.newChain("testChain"); chain.setKeyValStore(hfc.newFileKeyValStore(keyValStorePath)); chain.setMemberServicesUrl("grpc://localhost:7054"); - chain.enroll("admin", "Xurw3yU9zI0l", function (err, admin) { + chain.enroll("admin", "Xurw3yU9zI0l", function (err, user) { if (err) return cb(err); + admin = user; chain.setRegistrar(admin); - // Register and enroll webAdmin - registerAndEnroll("webAdmin", "client", {roles:['client']}, chain, function(err,webAdmin) { - if (err) return cb(err); - chain.setRegistrar(webAdmin); - registerAndEnroll("webUser", "client", null, chain, function(err, webUser) { - if (err) return cb(err); - registerAndEnroll("auditor", "auditor", null, chain, function(err, auditor) { - if (!err) return cb(err); - expect="webAdmin may not register member of type auditor"; - found = (err.toString()).match(expect); - if (!(found==expect)) cb(err); - registerAndEnroll("validator", "validator", null, chain, function(err, validator) { - if (!err) return cb(err); - expect="webAdmin may not register member of type validator"; - found = (err.toString()).match(expect); - if (!(found==expect)) cb(err); - chain.setRegistrar(webUser); - registerAndEnroll("webUser2", "client", null, chain, function(err) { - if (!err) return cb(Error("webUser should not be allowed to register a client")); - expect="webUser may not register member of type client"; - found = (err.toString()).match(expect); - if (!(found==expect)) cb(err); - return cb(); - }); - }); - }); - }); - }); + registrarTestStep2(cb); + }); +} + +// STEP 2: Register and enroll webAdmin and set as register +function registrarTestStep2(cb) { + console.log("registrarTest: STEP 2"); + registerAndEnroll("webAdmin", "client", null, {roles:['client']}, chain, function(err,user) { + if (err) return cb(err); + webAdmin = user; + chain.setRegistrar(webAdmin); + registrarTestStep3(cb); }); } -// Register and enroll user 'name' with role 'r' with registrar info 'registrar' for chain 'chain' -function registerAndEnroll(name, r, registrar, chain, cb) { +// STEP 3: Register webUser with an attribute" +function registrarTestStep3(cb) { + console.log("registrarTest: STEP 3"); + var attrs; + attrs = [{name:'foo',value:'bar'}]; + registerAndEnroll("webUser", "client", attrs, null, chain, function(err, user) { + if (err) return cb(err); + webUser = user; + registrarTestStep4(cb); + }); +} + +// STEP 4: Negative test attempting to register an auditor type by webAdmin +function registrarTestStep4(cb) { + console.log("registrarTest: STEP 4"); + registerAndEnroll("auditorUser", "auditor", null, null, chain, function(err) { + if (!err) return cb(Error("webAdmin should not have been allowed to register member of type auditor")); + err = checkErr(err,"webAdmin may not register member of type auditor"); + if (err) return cb(err); + registrarTestStep5(cb); + }); +} + +// STEP 5: Negative test attempting to register a validator type by webAdmin +function registrarTestStep5(cb) { + console.log("registrarTest: STEP 5"); + registerAndEnroll("validatorUser", "validator", null, null, chain, function(err) { + if (!err) return cb(Error("webAdmin should not have been allowed to register member of type validator")); + err = checkErr(err,"webAdmin may not register member of type validator"); + if (err) return cb(err); + registrarTestStep6(cb); + }); +} + +// STEP 6: Negative test attempting to register a client type by webUser +function registrarTestStep6(cb) { + console.log("registrarTest: STEP 6"); + chain.setRegistrar(webUser); + registerAndEnroll("webUser2", "client", null, webUser, chain, function(err) { + if (!err) return cb(Error("webUser should not be allowed to register a member of type client")); + err = checkErr(err,"webUser may not register member of type client"); + if (err) return cb(err); + registrarTestDone(cb); + }); +} + +// Final step: success +function registrarTestDone(cb) { + console.log("registrarTest: Done"); + return cb(); +} + +// Register and enroll user 'name' with role 'role' with registrar info 'registrar' for chain 'chain' +function registerAndEnroll(name, role, attributes, registrar, chain, cb) { console.log("registerAndEnroll %s",name); // User is not enrolled yet, so perform both registration and enrollment var registrationRequest = { - roles: [ r ], + roles: [ role ], enrollmentID: name, affiliation: "bank_a", + attributes: attributes, registrar: registrar }; chain.registerAndEnroll(registrationRequest,cb); @@ -150,6 +187,16 @@ function rmdir(path) { } } +function checkErr(err,expect) { + err = err.toString(); + var found = err.match(expect); + if (!found || found.length === 0) { + err = new Error(util.format("Incorrect error: expecting '%s' but found '%s'",expect,err)); + return err; + } + return null; +} + function pass(t, msg) { t.pass("Success: [" + msg + "]"); t.end(); diff --git a/sdk/node/test/unit/test-util.js b/sdk/node/test/unit/test-util.js new file mode 100644 index 00000000000..f5d494e7010 --- /dev/null +++ b/sdk/node/test/unit/test-util.js @@ -0,0 +1,111 @@ +/** + * Copyright 2016 IBM + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var hfc = require('hfc'); +var util = require('util'); +var fs = require('fs'); + +//Set defaults, if not set +var deployMode = process.env.SDK_DEPLOY_MODE + ? process.env.SDK_DEPLOY_MODE + : "net" ; +var keyStore = process.env.SDK_KEYSTORE + ? process.env.SDK_KEYSTORE + : "/tmp/keyValStore" ; +var caCert = process.env.SDK_CA_CERT_FILE + ? process.env.SDK_CA_CERT_FILE + : "tlsca.cert" ; +var caCertHost = process.env.SDK_CA_CERT_HOST + ? process.env.SDK_CA_CERT_HOST + : "" ; +var caAddr = process.env.SDK_MEMBERSRVC_ADDRESS + ? process.env.SDK_MEMBERSRVC_ADDRESS + : "localhost:7054" ; +var peerAddr0 = process.env.SDK_PEER_ADDRESS + ? process.env.SDK_PEER_ADDRESS + : "localhost:7051" ; +var tlsOn = ( process.env.SDK_TLS == null ) + ? Boolean(false) + : Boolean(parseInt(process.env.SDK_TLS)); +var deployWait = process.env.SDK_DEPLOYWAIT + ? process.env.SDK_DEPLOYWAIT + : 20; +var invokeWait = process.env.SDK_INVOKEWAIT + ? process.env.SDK_INVOKEWAIT + : 5; +var ciphers = process.env.GRPC_SSL_CIPHER_SUITES + +console.log("deployMode :"+deployMode ); +console.log("keyStore :"+keyStore ); +console.log("caCert :"+caCert ); +console.log("caAddr :"+caAddr ); +console.log("peerAddr0 :"+peerAddr0 ); +console.log("tlsOn :"+tlsOn ); +console.log("deployWait :"+deployWait ); +console.log("invokeWait :"+invokeWait ); +console.log("ciphers :"+ciphers ); + +function getTestChain(name) { + name = name || "testChain"; + var chain = hfc.newChain(name); + + chain.setKeyValStore(hfc.newFileKeyValStore(keyStore)); + if (tlsOn) { + if (fs.existsSync(caCert)) { + var pem = fs.readFileSync(caCert); + if (caCertHost) { var grpcOpts={ pem:pem, hostnameOverride:'tlsca' } } + else { var grpcOpts={ pem:pem } }; + + console.log("Setting membersrvc address to grpcs://" + caAddr); + console.log("Setting peer address to grpcs://" + peerAddr0); + console.log("Setting cert to " + caCert); + + chain.setMemberServicesUrl("grpcs://" + caAddr, { pem:pem, hostnameOverride:'tlsca' } ); + chain.addPeer("grpcs://" + peerAddr0, { pem:pem, hostnameOverride:'tlsca' } ); + } else { + console.log("TLS was requested but " + caCert + " not found.") + process.exit(1) + } + } else { + console.log("Setting membersrvc address to grpc://" + caAddr); + console.log("Setting peer address to grpc://" + peerAddr0); + chain.setMemberServicesUrl("grpc://" + caAddr); + chain.addPeer("grpc://" + peerAddr0); + } + // + // Set the chaincode deployment mode to either developent mode (user runs chaincode) + // or network mode (code package built and sent to the peer). + console.log("$SDK_DEPLOY_MODE: " + deployMode); + if (deployMode === 'dev') { + chain.setDevMode(true); + } else { + chain.setDevMode(false); + } + chain.setDeployWaitTime(parseInt(deployWait)); + chain.setInvokeWaitTime(parseInt(invokeWait)); + return chain; +} + +exports.getTestChain = getTestChain; +exports.deployMode = deployMode +exports.keyStore = keyStore +exports.caCert = caCert +exports.caAddr = caAddr +exports.peerAddr0 = peerAddr0 +exports.tlsOn = tlsOn +exports.deployWait = deployWait +exports.invokeWait = invokeWait +exports.ciphers = ciphers \ No newline at end of file diff --git a/tools/busywork/bin/busy b/tools/busywork/bin/busy index 242c639a6c1..c7d87091d09 100755 --- a/tools/busywork/bin/busy +++ b/tools/busywork/bin/busy @@ -1,13 +1,13 @@ #!/usr/bin/tclsh # Copyright IBM Corp. 2016. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,18 +19,18 @@ set usage { The 'busy' script runs commands on one or more peers in a busywork network, printing the salient portion of the response on stdout. 'busy' can be seen as -an easier-to-use version of the Hyperledger fabric command-line-interface -(CLI) in that 'busy' takes care of various kinds of bookkeeping and -boilerplate code behind the scenes. 'busy' is probably best used as a tool for -small scipts, or for issuing a few interactive commands to a peer -network. Alhough complex test scripts could be implemented entirely as 'busy' -calls, the performance of a test implemented this way would likely suffer from -the process-creation overhead of each 'busy' invocation. +an easier-to-use, enhanced version of the Hyperledger fabric +command-line-interface (CLI) in that 'busy' takes care of various kinds of +bookkeeping and boilerplate code behind the scenes. 'busy' is probably best +used as a tool for small scipts, or for issuing a few interactive commands to +a peer network. Alhough complex test scripts could be implemented entirely as +'busy' calls, the performance of a test implemented this way would likely +suffer from the process-creation overhead of each 'busy' invocation. If the 'busy' command targets a single peer (see below) then the response from -that peer is printed. If multiple peers are targeted, then the results are -collected and printed as a JSON object, where each result is keyed by the peer -name, e.g., +or for that peer is printed. If multiple peers are targeted, then the results +are collected and printed as a JSON object, where each result is keyed by the +peer name, e.g., { "vp0" : , @@ -39,8 +39,8 @@ name, e.g., "vp" : response> } -Use -json to force results from operations on a single peer to print as a JSON -object. +Include the -json option to force results from operations on a single peer to +print as a JSON object. 'busy' is only supported for peer networks described by a 'network' file in the BUSYWORK_HOME directory. The to target are named by the peer IDs @@ -76,6 +76,9 @@ The following command and argument forms are supported: ping + pid + ps + The 'network' and 'chaincodes' commands simply print the current 'network' and 'chaincodes' files respectively from the implied $BUSYWORK_HOME. @@ -110,6 +113,17 @@ queries fail. If the ping query succeeds then the output of the ping queries is returned. This function currently assumes that the chaincode implments a 'ping' query function with no parameters. +The 'pid' command simply returns the PID of each of the . + +The 'ps' command is used to obtain 'ps' information from one or more +peers. For each peer implied by the specification, the result returned +is the result from executing 'ps' as follows, where is the PID of each +of the : + + ps -p -ww --noheader -o + +Note that leading/trailing whitespace is removed from the result. + Examples: busy chaincodes @@ -124,6 +138,9 @@ Examples: busy ping "*" cc2 + busy pid vp0 + busy ps vp0 etime,cputime + Optional arguments, with defaults after the colon: -h | -help | --help : N/A @@ -149,12 +166,12 @@ Optional arguments, with defaults after the colon: The -waitFor option is supported for the 'deploy', 'invoke' and 'ping' commands only. The semantics are explained below in the section headed - "Semantics of -waitFor" + "Semantics of -waitFor" -json : See below Select -json to force even single-peer operations to print as a JSON - object, rather than simply as a value. This opiton is ignored for the + object, rather than simply as a value. This option is ignored for the 'chaincodes' and 'network' commands. @@ -191,6 +208,14 @@ proc singletonCommand {cmd} { } +proc fixedArgs {cmd nArgs} { + + if {[llength [parms args]] != $nArgs} { + errorExit "The '$cmd' command expects $nArgs arguments." + } +} + + proc chaincodes {} { singletonCommand chaincodes @@ -294,7 +319,7 @@ proc invoke {} { "Deployed IDs are $a(ids)" } set name $a($ccId.name) - + if {![null [parms waitFor]]} { set heights \ [mapeach address [parms restAddresses] { @@ -322,7 +347,7 @@ proc invoke {} { } } - + proc query {} { waitForNotOK query @@ -344,7 +369,7 @@ proc query {} { errorExit } set name $a($ccId.name) - + parms results \ [mapeach address [parms restAddresses] { return [fabric::query $address [parms user] $name $function $args] @@ -369,7 +394,7 @@ proc ping {} { errorExit } set name $a($ccId.name) - + proc _ping {name} { set results {} foreach address [parms restAddresses] { @@ -395,7 +420,33 @@ proc ping {} { } } - + +proc pid {} { + + waitForNotOK pid + fixedArgs pid 0 + + parms results [mapeach peer [parms peers] { + return [parms network.peer.$peer.pid] + }] +} + + +proc ps {} { + + waitForNotOK ps + fixedArgs ps 1 + + parms results [mapeach peer [parms peers] { + set pid [parms network.peer.$peer.pid] + if {[catch {exec ps -p $pid -ww --noheader -o [parms args]} result]} { + errorExit "Exec of 'ps' failed : $result" + } + return [string trim $result]; # Remove leading/trailing whitespace + }] +} + + ############################################################################ # The script ############################################################################ @@ -410,11 +461,11 @@ setLoggingLevel {} warn set options { {enum {-h -help --help} parms(help) 0 p_help} {key -home parms(home) {}} - {bool -user parms(user) {} p_user} + {key -user parms(user) {} p_user} {key -waitFor parms(waitFor) {}} {bool -json parms(json) 0} } - + mapKeywordArgs $argv $options parms(other) if {$p_help} { @@ -430,7 +481,7 @@ parms command [first [parms other]] switch [parms command] { chaincodes {chaincodes} network {network} -} +} parms peers [busywork::peersFromSpec [second [parms other]]] parms args [restn [parms other] 2] @@ -459,6 +510,8 @@ switch [parms command] { invoke {invoke} query {query} ping {ping} + pid {pid} + ps {ps} default {errorExit "Unrecognized command: [parms command]"} } @@ -489,9 +542,3 @@ if {[parms json] || [expr {[llength [parms results]] > 1}]} { puts [first [parms results]] } - - - - - - diff --git a/tools/busywork/bin/pprofClient b/tools/busywork/bin/pprofClient index 888c06f6a23..0e8df692184 100755 --- a/tools/busywork/bin/pprofClient +++ b/tools/busywork/bin/pprofClient @@ -1,13 +1,13 @@ #!/usr/bin/tclsh # Copyright IBM Corp. 2016. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -66,7 +66,7 @@ of the profile files - they are named by the host:port of the profiling port, and given sequence numbers to make them unique. When this script is run against busywork networks, the final act of the script is to create symbolic links that substitute the host:port portion of the file names with the peer -names (vp[0,...N]). +names (vp[0,...N]). Optional arguments, with defaults after the colon: @@ -140,7 +140,7 @@ set options { {key -peers parms(peers) {} p_peers} {key -port parms(port) 6060} } - + mapKeywordArgs $argv $options parms(other) if {$p_help} { @@ -153,12 +153,12 @@ if {[parms quiet]} { } else { setLoggingLevel {} note } - + parms home [busywork::home [parms home]] # Handle implicit vs. explict peers. With implicit peers we can give the files # recognizable names. With explicit peers we can only use the host:port as the -# names. +# names. if {$p_peers} { @@ -175,34 +175,34 @@ if {$p_peers} { parms profileAddresses [parms ids] } else { - + # Implicit peers - + if {[catch {busywork::networkToArray ::parms network.} msg]} { errorExit $msg } - + switch [llength [parms other]] { - + 1 { # All peers - + parms service [parms other] parms ids [parms network.peer.ids] parms profileAddresses [parms network.peer.profileAddresses] } 2 { # Subset of peers - + set spec [first [parms other]] parms service [second [parms other]] - + if {[catch {enumerate $spec} result]} { errorExit \ "Error parsing peer specification : $result" } set nPeers [llength [parms network.peer.ids]] - + parms ids \ [mapeach n $result { if {$n > $nPeers} { @@ -214,7 +214,7 @@ if {$p_peers} { [mapeach n $result { return [lindex [parms network.peer.profileAddresses] $n] }] - + } default { puts $usage @@ -283,7 +283,8 @@ if {[waitPIDs $pids]} { # find the profile files that the tool just created and link the names given # by the tool with names based on the peer IDs. The Go pprof tool does not # support providing names for the profile files - it names them based on their -# http addresses using the form 'pprof..'. +# http addresses using the form 'pprof.[.].'. The form with the [.] was introduced in Go 1.7. # The renaming-via-links being done here is not perfect in every case, and can # cause confusion (but NOT lost files) if profiles from multiple runs are @@ -297,31 +298,41 @@ if {!$p_peers} { set idMap($target) $id } foreach file [glob pprof.*] { - if {[regexp {^pprof.([^:]+:\d+)(.*)(\d\d\d)(.*)} \ + set executable "" + if {[regexp {^pprof\.([^:]+:\d+)\.(.*)(\d\d\d)(.*)} \ $file match host a b c]} { - if {[info exists idMap($host)]} { - if {$p_tag} { - set b [parms tag] - } - set reName pprof.$host$a$b$c - set newName pprof.$idMap($host)$a$b$c - if {![file exists $newName]} { + + # We can't disambiguate the . form from a + # generic with a regular expression. So we need to + # check to see if one of the hosts we're interested in is a + # terminal suffix of the 'host' pulled form the regex match. + + foreach target [parms profileAddresses] { + if {[string match *$target $host]} { if {$p_tag} { - if {[catch {exec mv $file $reName} why]} { - errorExit "Can't rename $file to $reName : $why" - } - if {[catch {exec ln -sf $reName $newName} code]} { - errorExit \ - "Error linking $file to $newName : $code" - } - note {} "$newName -> $reName (from $file)" - } else { - if {[catch {exec ln -sf $file $newName} code]} { - errorExit \ - "Error linking $file to $newName : $code" + set b [parms tag] + } + set reName pprof.$host.$a$b$c + set newName pprof.$idMap($target).$a$b$c + if {![file exists $newName]} { + if {$p_tag} { + if {[catch {exec mv $file $reName} why]} { + errorExit "Can't rename $file to $reName : $why" + } + if {[catch {exec ln -sf $reName $newName} code]} { + errorExit \ + "Error linking $file to $newName : $code" + } + note {} "$newName -> $reName (from $file)" + } else { + if {[catch {exec ln -sf $file $newName} code]} { + errorExit \ + "Error linking $file to $newName : $code" + } + note {} "$newName -> $file" } - note {} "$newName -> $file" - } + } + break } } } diff --git a/tools/busywork/bin/userModeNetwork b/tools/busywork/bin/userModeNetwork index bdeab1ce601..9bdfc8e0a03 100755 --- a/tools/busywork/bin/userModeNetwork +++ b/tools/busywork/bin/userModeNetwork @@ -65,6 +65,11 @@ Optional arguments, with defaults after the colon: the 'membersrvc' server. Peer login credentials are obtained from the fabric/membersrvc.yaml file. +-privacy | -noPrivacy : -noPrivacy + + Controls whether privacy is enabled. This option only has effect if + -security is enabled. + -noops | -batch : -batch Use one of these options to select the consensus mode. The default @@ -194,6 +199,7 @@ set options { {key -home parms(home) {}} {key -interface parms(interface) {} p_interface} {bool {-security -noSecurity} parms(security) 0} + {bool {-privacy -noPrivacy} parms(privacy) 0} {enum {-noops -batch} parms(consensus) -batch} {bool {-profile -noProfile} parms(profile) 1} {enum {-pristine -clean -dirty} parms(clean) -pristine} @@ -360,6 +366,7 @@ puts $config " \"host\": \"local\"," puts $config " \"date\": \"[timestamp]\"," puts $config " \"createdBy\": \"userModeNetwork\"," puts $config " \"security\": \"[? [parms security] true false]\"," +puts $config " \"privacy\": \"[? [parms privacy] true false]\"," puts $config " \"consensus\": \"$CONSENSUS_TO_MODE([parms consensus])\"," puts $config " \"peerProfileServer\": \"[? [parms profile] true false]\"," @@ -454,6 +461,7 @@ foreach clause $peerMap { set ::env(CORE_SECURITY_ENROLLID) $user set ::env(CORE_SECURITY_ENROLLSECRET) $password set ::env(CORE_SECURITY_ENABLED) true + set ::env(CORE_SECURITY_PRIVACY) [? [parms privacy] true false] } else { set ::env(CORE_SECURITY_ENABLED) false } diff --git a/tools/busywork/counters/driver b/tools/busywork/counters/driver index 23af5a0cb0f..7015cbc7991 100755 --- a/tools/busywork/counters/driver +++ b/tools/busywork/counters/driver @@ -513,6 +513,20 @@ parms txDelay [durationToMs [parms txDelay]] parms peerDelay [durationToMs [parms peerDelay]] parms netDelay [durationToMs [parms netDelay]] +# We can collect run-time statistics if 1) We are running locally on Linux, +# and 2) if the driver was started from a busywork 'network' file. + +if {[parms remote] || + ![null [parms explicitPeers]] || + [catch {exec uname} os] || + ($os ne "Linux")} { + errorExit "'[parms remote]' '[null [parms explicitPeers]]' '$os'" + parms collectStats 0 +} else { + parms collectStats 1 +} + + ############################################################################ # Setup ############################################################################ @@ -904,6 +918,15 @@ proc clientRoutine {i_logger} { } +# Get peer stats before the client fork + +if {[parms collectStats]} { + foreach peer [parms network.peer.ids] pid [parms network.peer.pids] { + procPIDStat $pid ::parms stat.before.$peer. + } +} + + # Fork clients. The parent continues the script once all clients have exited; # clients run their driver routine and exit. @@ -946,14 +969,15 @@ if {$p_pprofClient} { # event of errors until the final agreement check. note {} "Waiting (indefinitely) for subprocesses to complete" -set t [time {set errors [waitPIDs $pids]} 1] +set startMs [clock milliseconds] +set errors [waitPIDs $pids] +set issueSec [expr ([clock milliseconds] - $startMs) / 1000.0] if {!$errors} { - set seconds [expr {[lindex $t 0] / 1e6}] - set rate [format %.2f [expr {[parms totalTransactions] / $seconds}]] + set rate [format %.2f [expr {[parms totalTransactions] / $issueSec}]] note {} \ - "Transaction rate : $rate per second " \ - "([parms totalTransactions] / $seconds)" + "Issue + interlock TX rate : $rate per second " \ + "([parms totalTransactions] / $issueSec)" } if {$p_pprofClient} { @@ -964,6 +988,33 @@ if {$p_pprofClient} { } } +# Print peer stats + +if {[parms collectStats]} { + if {[catch {exec getconf CLK_TCK} CLK_TCK]} { + error {} "Can't getconf CLK_TCK: $CLK_TCK" + } else { + set CLK_TCK $CLK_TCK.0; # Poor man's float() + note {} "Peer statistics (excluding initialization and deployment)" + note {} " Peer: CPU (sec) = User + System: Utilization: Threads" + note {} " -----------------------------------------------------" + foreach peer [parms network.peer.ids] pid [parms network.peer.pids] { + procPIDStat $pid ::parms stat.after.$peer. + set user \ + [expr {([parms stat.after.$peer.utime] - \ + [parms stat.before.$peer.utime]) / $CLK_TCK}] + set sys \ + [expr {([parms stat.after.$peer.stime] - \ + [parms stat.before.$peer.stime]) / $CLK_TCK}] + set cpu [expr {$user + $sys}] + set util [expr {$cpu * 100.0 / $issueSec}] + set threads [parms stat.after.$peer.num_threads] + note {} [format "%8s %8.2f %8.2f %8.2f %8.0f%% %10d" \ + $peer $cpu $user $sys $util $threads] + } + } +} + if {$errors && ![parms force]} { errorExit "Aborting due to client errors" } @@ -987,6 +1038,13 @@ if {[parms interlock] && ![parms noops]} { set heights [removeDuplicates $originalHeights] if {[llength $heights] != 1} { note {} " Observed block heights: $originalHeights" + } else { + note {} " Consensus block height: $heights" + set finalSec [expr ([clock milliseconds] - $::startMs) / 1000.0] + set rate [format %.2f [expr {[parms totalTransactions] / $finalSec}]] + note {} \ + "Fully committed TX rate : $rate per second " \ + "([parms totalTransactions] / $finalSec)" } return [expr {[llength $heights] == 1}] } diff --git a/tools/busywork/tcl/fabric.tcl b/tools/busywork/tcl/fabric.tcl index afe11a8dd6d..3a9b1e168fa 100644 --- a/tools/busywork/tcl/fabric.tcl +++ b/tools/busywork/tcl/fabric.tcl @@ -25,7 +25,7 @@ package provide fabric 0.0 namespace eval ::fabric {} ############################################################################ -# devops i_peer i_method i_query {i_retry 0} +# chaincode i_peer i_method i_query {i_retry 0} # Make a REST API 'devops' query. The i_peer is the full host:port # address. The i_method must be 'deploy', 'invoke' or 'query'. @@ -40,12 +40,12 @@ namespace eval ::fabric {} # exits. If the HTTP access fails then the call will exit with Tcl error{} and # the caller will presumably catch{} the error and do whatever is appropriate. -proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { +proc ::fabric::chaincode {i_peer i_method i_query {i_retry 0}} { for {set retry [math:::max $i_retry 0]} {$retry >= 0} {incr retry -1} { if {[catch { - ::http::geturl http://$i_peer/devops/$i_method -query $i_query + ::http::geturl http://$i_peer/chaincode -query $i_query } token]} { if {$i_retry < 0} { http::cleanup $token @@ -54,7 +54,7 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { if {$retry > 0} { if {$retry == $i_retry} { warn fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retrying after catastrophic HTTP error" } http::cleanup $token @@ -62,13 +62,13 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { } if {($retry == 0) && ($i_retry != 0)} { err fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retry limit ($i_retry) hit after " \ "catastrophic HTTP error : Aborting" } http::cleanup $token errorExit \ - "fabric::devops/$i_method $i_peer : ::http::geturl failed\n" \ + "fabric::chaincode $i_peer $i_method: ::http::geturl failed\n" \ $::errorInfo } @@ -80,12 +80,12 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { if {$retry > 0} { if {$retry == $i_retry} { warn fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retrying after HTTP error return" } if {($retry == 0) && ($i_retry != 0)} { err fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Retry limit ($i_retry) hit after " \ "HTTP error return : Aborting" } @@ -93,48 +93,52 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { continue } err fabric \ - "FABRIC '$i_method' transaction to $i_peer failed " \ + "fabric::chaincode '$i_method' transaction to $i_peer failed " \ "with ncode = '[http::ncode $token]'; Aborting\n" httpErrorExit $token } set response [http::data $token] - set err [catch { + set fail [catch { set parse [json::json2dict $response] - set ok [dict get $parse OK] - switch $i_method { - deploy - - invoke { - set result [dict get $parse message] + set result [dict get $parse result] + set status [dict get $result status] + if {$status ne "OK"} { + error "Status not OK" } - query { - set result $ok - } - default { - error "Unrecognized method $i_method" - } - } - }] + set message [dict get $result message] + } err] - if {$err} { - err fabric \ - "FABRIC '$i_method' response from $i_peer " \ - "is malformed/unexpected" - httpErrorExit $token + if {$fail} { + + set msg \ + [concat \ + "fabric::chaincode '$i_method' response from $i_peer " \ + "is malformed/unexpected: $err"] + + if {$i_retry < 0} { + http::cleanup $token + error $msg + + } else { + + err fabric $msg + httpErrorExit $token + } } http::cleanup $token if {($i_retry >= 0) && ($retry != $i_retry)} { note fabric \ - "fabric::devops/$i_method $i_peer : " \ + "fabric::chaincode $i_peer $i_method: " \ "Success after [expr {$i_retry - $retry}] HTTP retries" } break } - return $result + return $message } @@ -144,17 +148,17 @@ proc ::fabric::devops {i_peer i_method i_query {i_retry 0}} { # Deploy a GOLANG chaincode to the network. The i_peer is the full network # address (host:port) of the REST API port of the peer. If i_user is # non-empty then this will be a secure transaction. The constructor will apply -# i_fn to i_args. Note that i_args is a normal Tcl list. This routine will -# convert i_args into a JSON array, wrapping each element of i_args in double -# quotes. i_fn will also be properly quoted. +# i_fn to i_args. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::deploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { set template { - { - "type" : "GOLANG", + "jsonrpc" : "2.0", + "method" : "deploy", + "params" : { + "type": 1, "chaincodeID" : { "path" : "$i_chaincode" }, @@ -162,16 +166,17 @@ proc ::fabric::deploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { "args" : [$args] }, "secureContext": "$i_user" - } + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] - - return [devops $i_peer deploy $query $i_retry] + set query [list [subst -nocommand $template]] + return [chaincode $i_peer deploy $query $i_retry] } + ############################################################################ # devModeDeploy i_peer i_user i_chaincode i_fn i_args {i_retry 0} @@ -179,13 +184,15 @@ proc ::fabric::deploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { # mode. Here, the i_chaincode is a user-specified name. All of the other # arguments are otherwise the same as for deploy{}. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::devModeDeploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} { set template { - { - "type" : "GOLANG", + "jsonrpc" : "2.0", + "method" : "deploy", + "params" : { + "type": 1, "chaincodeID" : { "name" : "$i_chaincode" }, @@ -193,14 +200,14 @@ proc ::fabric::devModeDeploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} "args" : [$args] }, "secureContext": "$i_user" - } + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] + set query [list [subst -nocommand $template]] return [devops $i_peer deploy $query $i_retry] - } ############################################################################ @@ -209,33 +216,32 @@ proc ::fabric::devModeDeploy {i_peer i_user i_chaincode i_fn i_args {i_retry 0}} # Invoke a GOLANG chaincode on the network. The i_peer is the full network # address (host:port) of the REST API port of the peer. If i_user is non-empty # then this will be a secure transaction. The i_chaincodeName is the hash used -# to identify the chaincode. The invocation will apply i_fn to i_args. Note -# that i_args is a normal Tcl list. This routine will convert i_args into a -# JSON array, wrapping each element of i_args in double quotes. i_fn will also -# be properly quoted. +# to identify the chaincode. The invocation will apply i_fn to i_args. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::invoke {i_peer i_user i_chaincodeName i_fn i_args {i_retry 0}} { set template { - { - "chaincodeSpec" : {"type" : "GOLANG", - "chaincodeID" : { - "name" : "$i_chaincodeName" - }, - "ctorMsg" : { - "args" : [$args] - }, - "secureContext": "$i_user" - } - } + "jsonrpc" : "2.0", + "method" : "invoke", + "params" : { + "type": 1, + "chaincodeID" : { + "name" : "$i_chaincodeName" + }, + "ctorMsg" : { + "args" : [$args] + }, + "secureContext": "$i_user" + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] + set query [list [subst -nocommand $template]] - return [devops $i_peer invoke $query $i_retry] + return [chaincode $i_peer invoke $query $i_retry] } @@ -245,34 +251,32 @@ proc ::fabric::invoke {i_peer i_user i_chaincodeName i_fn i_args {i_retry 0}} { # Query a GOLANG chaincode on the network. The i_peer is the full network # address (host:port) of the REST API port of the peer. If i_user is non-empty # then this will be a secure transaction. The i_chaincodeName is the hash used -# to identify the chaincode. The query will apply i_fn to i_args. Note that -# i_args is a normal Tcl list. This routine will convert i_args into a JSON -# array, wrapping each element of i_args in double quotes. i_fn will also be -# properly quoted. The query result (currently assumed to be a string) is -# returned. +# to identify the chaincode. The query will apply i_fn to i_args. -# See ::fabric::devops{} for a discussion of the 'i_retry' parameter. +# See ::fabric::chaincode{} for a discussion of the 'i_retry' parameter. proc ::fabric::query {i_peer i_user i_chaincodeName i_fn i_args {i_retry 0}} { set template { - { - "chaincodeSpec" : {"type" : "GOLANG", - "chaincodeID" : { - "name" : "$i_chaincodeName" - }, - "ctorMsg" : { - "args" : [$args] - }, - "secureContext": "$i_user" - } - } + "jsonrpc" : "2.0", + "method" : "query", + "params" : { + "type": 1, + "chaincodeID" : { + "name" : "$i_chaincodeName" + }, + "ctorMsg" : { + "args" : [$args] + }, + "secureContext": "$i_user" + }, + "id": 1 } set args [argify $i_fn $i_args] - set query [subst -nocommand $template] + set query [list [subst -nocommand $template]] - return [devops $i_peer query $query $i_retry] + return [chaincode $i_peer query $query $i_retry] } @@ -298,11 +302,11 @@ proc ::fabric::height {i_peer {i_retry 0}} { "$i_peer /chain: ::http::geturl failed " \ "with $i_retry retries : $token" } - + if {[http::ncode $token] != 200} { - + # Failure - + if {$retry > 0} { if {$retry == $i_retry} { warn fabric \ @@ -311,12 +315,12 @@ proc ::fabric::height {i_peer {i_retry 0}} { http::cleanup $token continue } - + err fabric \ "$i_peer /chain; REST API call failed with $i_retry retries" httpErrorExit $token } - + if {[catch {json::json2dict [http::data $token]} parse]} { err fabric "$i_peer /chain: JSON response does not parse: $parse" httpErrorExit $token @@ -430,7 +434,7 @@ proc ::fabric::caLogin {i_peer i_user i_secret} { ############################################################################ # argify i_fn i_args -# Convert old-style fn + args pair into a list of quoted base64 arguments with +# Convert old-style fn + args pair into a list of quoted arguments with # commas to satisfy the most recent JSON format of the REST API. This needs to # be done as a string (rather than as a list), otherwise it will be {} quoted # when substituted. @@ -438,11 +442,11 @@ proc ::fabric::caLogin {i_peer i_user i_secret} { proc ::fabric::argify {i_fn i_args} { set args [concat $i_fn $i_args] - set args64 "" + set jsonargs "" set comma "" foreach arg $args { - set args64 "$args64$comma\"[binary encode base64 $arg]\"" + set jsonargs "$jsonargs$comma\"$arg\"" set comma , } - return $args64 + return $jsonargs } diff --git a/tools/busywork/tcl/os.tcl b/tools/busywork/tcl/os.tcl index 32891b64ed9..b2c8af544b1 100644 --- a/tools/busywork/tcl/os.tcl +++ b/tools/busywork/tcl/os.tcl @@ -1,13 +1,13 @@ # os.tcl - Utilities related to the operating system functions # Copyright IBM Corp. 2016. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -43,7 +43,7 @@ proc waitPIDs {pids {digest {}}} { set _digest($pid.why) $why set _digest($pid.rc) $rc set _digest($pid.ok) [expr {($why eq "EXIT") && ($rc == 0)}] - + switch $why { EXIT { if {$rc != 0} { @@ -67,3 +67,97 @@ proc waitPIDs {pids {digest {}}} { } return $rv } + + +############################################################################ +# procPIDStat i_pid o_array {i_prefix ""} + +# This procedure parses the result of executing + +# cat /proc/[i_pid]/stat + +# and stores the results in the array o_array keyed by name. See man proc(5) +# under /proc/[pid]/stat for an interpretation of the fields. The names are +# taken directly from the man page. The optional i_prefix can be specified to +# allow stats for multiple PIDs to be stored in the same array, using an +# indexing scheme chosen by the caller. + +# NB: This procedure only works on Linux, and will fail on other operating +# systems. + +# BUGS: Field 2 (comm) is the command name in parenthesis. If the file name of +# the command includes white space this will throw off the current parser. + +# Note: Copy/edited taken directly from the man page, which uses 1-based +# addressing. +array unset ::procPIDStatMap +foreach {index key} { + 1 pid + 2 comm + 3 state + 4 ppid + 5 pgrp + 6 session + 7 tty_nr + 8 tpgid + 9 flags + 10 minflt + 11 cminflt + 12 majflt + 13 cmajflt + 14 utime + 15 stime + 16 cutime + 17 cstime + 18 priority + 19 nice + 20 num_threads + 21 itrealvalue + 22 starttime + 23 vsize + 24 rss + 25 rsslim + 26 startcode + 27 endcode + 28 startstack + 29 kstkesp + 30 kstkeip + 31 signal + 32 blocked + 33 sigignore + 34 sigcatch + 35 wchan + 36 nswap + 37 cnswap + 38 exit_signal + 39 processor + 40 rt_priority + 41 policy + 42 delayacct_blkio_ticks + 43 guest_time + 44 cguest_time + 45 start_data + 46 end_data + 47 start_brk + 48 arg_start + 49 arg_end + 50 env_start + 51 env_end + 52 exit_code +} { + set ::procPIDStatMap([expr {$index - 1}]) $key +} + + +proc procPIDStat {i_pid o_array {i_prefix ""}} { + + upvar $o_array a + + if {[catch {exec cat /proc/$i_pid/stat} stat]} { + errorExit "Can not cat /proc/$i_pid/stat: $stat" + } + + foreach index [array names ::procPIDStatMap] { + set a($i_prefix$::procPIDStatMap($index)) [lindex $stat $index] + } +} diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf new file mode 100644 index 00000000000..15071de1011 --- /dev/null +++ b/vendor/github.com/golang/protobuf/Make.protobuf @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Includable Makefile to add a rule for generating .pb.go files from .proto files +# (Google protocol buffer descriptions). +# Typical use if myproto.proto is a file in package mypackage in this directory: +# +# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf + +%.pb.go: %.proto + protoc --go_out=. $< + diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile new file mode 100644 index 00000000000..80b6a17d024 --- /dev/null +++ b/vendor/github.com/golang/protobuf/Makefile @@ -0,0 +1,54 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +all: install + +install: + go install ./proto ./jsonpb ./ptypes + go install ./protoc-gen-go + +test: + go test ./proto ./jsonpb ./ptypes + make -C protoc-gen-go/testdata test + +clean: + go clean ./... + +nuke: + go clean -i ./... + +regenerate: + make -C protoc-gen-go/descriptor regenerate + make -C protoc-gen-go/plugin regenerate + make -C protoc-gen-go/testdata regenerate + make -C proto/testdata regenerate + make -C jsonpb/jsonpb_test_proto regenerate diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md index fa4883f6266..037fc7c8e2d 100644 --- a/vendor/github.com/golang/protobuf/README.md +++ b/vendor/github.com/golang/protobuf/README.md @@ -101,6 +101,12 @@ for a protocol buffer variable v: with distinguished wrapper types for each possible field value. - Marshal and Unmarshal are functions to encode and decode the wire format. +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + Consider file test.proto, containing ```proto @@ -134,6 +140,7 @@ To create and play with a Test object from the example package, test := &example.Test { Label: proto.String("hello"), Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, Optionalgroup: &example.Test_OptionalGroup { RequiredField: proto.String("good bye"), }, @@ -184,3 +191,51 @@ the `plugins` parameter to protoc-gen-go; the usual way is to insert it into the --go_out argument to protoc: protoc --go_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index c9afada0853..38f86c5f7e7 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -30,49 +30,52 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* -Package jsonpb provides marshaling/unmarshaling functionality between -protocol buffer and JSON objects. +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. -Compared to encoding/json, this library: - - encodes int64, uint64 as strings - - optionally encodes enums as integers +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. */ package jsonpb import ( "bytes" "encoding/json" + "errors" "fmt" "io" "reflect" "sort" "strconv" "strings" + "time" "github.com/golang/protobuf/proto" ) -var ( - byteArrayType = reflect.TypeOf([]byte{}) -) - // Marshaler is a configurable object for converting between -// protocol buffer objects and a JSON representation for them +// protocol buffer objects and a JSON representation for them. type Marshaler struct { // Whether to render enum values as integers, as opposed to string values. EnumsAsInts bool + // Whether to render fields with zero values. + EmitDefaults bool + // A string to indent each level by. The presence of this field will // also cause a space to appear between the field separator and // value, and for newlines to be appear between fields and array // elements. Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool } // Marshal marshals a protocol buffer into JSON. func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "") + return m.marshalObject(writer, pb, "", "") } // MarshalToString converts a protocol buffer object to JSON string. @@ -84,24 +87,96 @@ func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { return buf.String(), nil } +type int32Slice []int32 + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + // marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string) error { +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond + x := fmt.Sprintf("%.9f", d.Seconds()) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct": + // Let marshalValue handle the `fields` map. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + out.write("{") if m.Indent != "" { out.write("\n") } - s := reflect.ValueOf(v).Elem() - writeBeforeField := "" + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + for i := 0; i < s.NumField(); i++ { value := s.Field(i) valueField := s.Type().Field(i) if strings.HasPrefix(valueField.Name, "XXX_") { continue } - fieldName := jsonFieldName(valueField) - - // TODO: proto3 objects should have default values omitted. // IsNil will panic on most value kinds. switch value.Kind() { @@ -111,58 +186,197 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string } } + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + } + } + // Oneof fields need special handling. if valueField.Tag.Get("protobuf_oneof") != "" { // value is an interface containing &T{real_value}. sv := value.Elem().Elem() // interface -> *T -> T value = sv.Field(0) valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } - var p proto.Properties - p.Parse(sv.Type().Field(0).Tag.Get("protobuf")) - fieldName = p.OrigName + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false } - out.write(writeBeforeField) + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + // Only the part of type_url after the last slash is relevant. + mname := turl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return fmt.Errorf("unknown message type %q", mname) + } + msg := reflect.New(mt.Elem()).Interface().(proto.Message) + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") if m.Indent != "" { - out.write(indent) - out.write(m.Indent) + out.write("\n") } - out.write(`"`) - out.write(fieldName) - out.write(`":`) + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) if m.Indent != "" { - out.write(" ") + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) } - - if err := m.marshalValue(out, value, valueField, indent); err != nil { + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { return err } - if m.Indent != "" { - writeBeforeField = ",\n" - } else { - writeBeforeField = "," + out.write("\n") + out.write(indent) } + out.write("}") + return out.err } + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { if m.Indent != "" { - out.write("\n") out.write(indent) + out.write(m.Indent) } - out.write("}") + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) return out.err } +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + // marshalValue writes the value to the Writer. -func (m *Marshaler) marshalValue(out *errWriter, v reflect.Value, - structField reflect.StructField, indent string) error { +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { var err error v = reflect.Indirect(v) // Handle repeated elements. - if v.Type() != byteArrayType && v.Kind() == reflect.Slice { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { out.write("[") comma := "" for i := 0; i < v.Len(); i++ { @@ -174,7 +388,9 @@ func (m *Marshaler) marshalValue(out *errWriter, v reflect.Value, out.write(m.Indent) out.write(m.Indent) } - m.marshalValue(out, sliceVal, structField, indent+m.Indent) + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } comma = "," } if m.Indent != "" { @@ -186,9 +402,21 @@ func (m *Marshaler) marshalValue(out *errWriter, v reflect.Value, return out.err } + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + // Handle enumerations. - protoInfo := structField.Tag.Get("protobuf") - if !m.EnumsAsInts && strings.Contains(protoInfo, ",enum=") { + if !m.EnumsAsInts && prop.Enum != "" { // Unknown enum values will are stringified by the proto library as their // value. Such values should _not_ be quoted or they will be interpreted // as an enum string instead of their value. @@ -212,7 +440,7 @@ func (m *Marshaler) marshalValue(out *errWriter, v reflect.Value, // Handle nested messages. if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent) + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") } // Handle maps. @@ -253,7 +481,7 @@ func (m *Marshaler) marshalValue(out *errWriter, v reflect.Value, out.write(` `) } - if err := m.marshalValue(out, v.MapIndex(k), structField, indent+m.Indent); err != nil { + if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { return err } } @@ -282,32 +510,132 @@ func (m *Marshaler) marshalValue(out *errWriter, v reflect.Value, return out.err } +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + // Unmarshal unmarshals a JSON object stream into a protocol // buffer. This function is lenient and will decode any options // permutations of the related Marshaler. func Unmarshal(r io.Reader, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := json.NewDecoder(r).Decode(&inputValue); err != nil { - return err - } - return unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue) + return new(Unmarshaler).Unmarshal(r, pb) } // UnmarshalString will populate the fields of a protocol buffer based // on a JSON string. This function is lenient and will decode any options // permutations of the related Marshaler. func UnmarshalString(str string, pb proto.Message) error { - return Unmarshal(strings.NewReader(str), pb) + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) } // unmarshalValue converts/copies a value into the target. -func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { targetType := target.Type() // Allocate memory for pointer fields. if targetType.Kind() == reflect.Ptr { target.Set(reflect.New(targetType.Elem())) - return unmarshalValue(target.Elem(), inputValue) + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + // Handle well-known types. + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := target.Addr().Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, except that null is allowed." + // encoding/json will turn JSON `null` into Go `nil`, + // so we don't have to do any extra work. + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + return fmt.Errorf("unmarshaling Any not supported yet") + case "Duration": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + ns := t.UnixNano() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + target.SetInt(int64(n)) + return nil } // Handle nested messages. @@ -317,59 +645,59 @@ func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { return err } + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + sprops := proto.GetProperties(targetType) for i := 0; i < target.NumField(); i++ { ft := target.Type().Field(i) if strings.HasPrefix(ft.Name, "XXX_") { continue } - fieldName := jsonFieldName(ft) - valueForField, ok := jsonFields[fieldName] + valueForField, ok := consumeField(sprops.Prop[i]) if !ok { continue } - delete(jsonFields, fieldName) - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. We do this while handling - // the struct so we have access to the enum info. - // The case of an enum appearing as a number is handled - // by the recursive call to unmarshalValue. - if enum := sprops.Prop[i].Enum; valueForField[0] == '"' && enum != "" { - vmap := proto.EnumValueMap(enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := valueForField[1 : len(valueForField)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, enum) - } - f := target.Field(i) - if f.Kind() == reflect.Ptr { // proto2 - f.Set(reflect.New(f.Type().Elem())) - f = f.Elem() - } - f.SetInt(int64(n)) - continue - } - if err := unmarshalValue(target.Field(i), valueForField); err != nil { + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { return err } } // Check for any oneof fields. - for fname, raw := range jsonFields { - if oop, ok := sprops.OneofTypes[fname]; ok { + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } nv := reflect.New(oop.Type.Elem()) target.Field(oop.Field).Set(nv) - if err := unmarshalValue(nv.Elem().Field(0), raw); err != nil { + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { return err } - delete(jsonFields, fname) } } - if len(jsonFields) > 0 { + if !u.AllowUnknownFields && len(jsonFields) > 0 { // Pick any field to be the scapegoat. var f string for fname := range jsonFields { @@ -382,7 +710,7 @@ func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { } // Handle arrays (which aren't encoded bytes) - if targetType != byteArrayType && targetType.Kind() == reflect.Slice { + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { var slc []json.RawMessage if err := json.Unmarshal(inputValue, &slc); err != nil { return err @@ -390,7 +718,7 @@ func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { len := len(slc) target.Set(reflect.MakeSlice(targetType, len, len)) for i := 0; i < len; i++ { - if err := unmarshalValue(target.Index(i), slc[i]); err != nil { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { return err } } @@ -404,6 +732,13 @@ func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { return err } target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop + } for ks, raw := range mp { // Unmarshal map key. The core json library already decoded the key into a // string, so we handle that specially. Other types were quoted post-serialization. @@ -412,14 +747,14 @@ func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { k = reflect.ValueOf(ks) } else { k = reflect.New(targetType.Key()).Elem() - if err := unmarshalValue(k, json.RawMessage(ks)); err != nil { + if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { return err } } // Unmarshal map value. v := reflect.New(targetType.Elem()).Elem() - if err := unmarshalValue(v, raw); err != nil { + if err := u.unmarshalValue(v, raw, valprop); err != nil { return err } target.SetMapIndex(k, v) @@ -438,11 +773,26 @@ func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { return json.Unmarshal(inputValue, target.Addr().Interface()) } -// jsonFieldName returns the field name to use. -func jsonFieldName(f reflect.StructField) string { +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { var prop proto.Properties prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) - return prop.OrigName + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts } // Writer wrapper inspired by https://blog.golang.org/errors-are-values @@ -462,10 +812,21 @@ func (w *errWriter) write(str string) { // The easiest way to sort them in some deterministic order is to use fmt. // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. type mapKeys []reflect.Value func (s mapKeys) Len() int { return len(s) } func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go new file mode 100644 index 00000000000..d2f6604a428 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go @@ -0,0 +1,559 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package jsonpb + +import ( + "bytes" + "encoding/json" + "io" + "reflect" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + anypb "github.com/golang/protobuf/ptypes/any" + durpb "github.com/golang/protobuf/ptypes/duration" + stpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + wpb "github.com/golang/protobuf/ptypes/wrappers" +) + +var ( + marshaler = Marshaler{} + + marshalerAllOptions = Marshaler{ + Indent: " ", + } + + simpleObject = &pb.Simple{ + OInt32: proto.Int32(-32), + OInt64: proto.Int64(-6400000000), + OUint32: proto.Uint32(32), + OUint64: proto.Uint64(6400000000), + OSint32: proto.Int32(-13), + OSint64: proto.Int64(-2600000000), + OFloat: proto.Float32(3.14), + ODouble: proto.Float64(6.02214179e23), + OBool: proto.Bool(true), + OString: proto.String("hello \"there\""), + OBytes: []byte("beep boop"), + } + + simpleObjectJSON = `{` + + `"oBool":true,` + + `"oInt32":-32,` + + `"oInt64":"-6400000000",` + + `"oUint32":32,` + + `"oUint64":"6400000000",` + + `"oSint32":-13,` + + `"oSint64":"-2600000000",` + + `"oFloat":3.14,` + + `"oDouble":6.02214179e+23,` + + `"oString":"hello \"there\"",` + + `"oBytes":"YmVlcCBib29w"` + + `}` + + simpleObjectPrettyJSON = `{ + "oBool": true, + "oInt32": -32, + "oInt64": "-6400000000", + "oUint32": 32, + "oUint64": "6400000000", + "oSint32": -13, + "oSint64": "-2600000000", + "oFloat": 3.14, + "oDouble": 6.02214179e+23, + "oString": "hello \"there\"", + "oBytes": "YmVlcCBib29w" +}` + + repeatsObject = &pb.Repeats{ + RBool: []bool{true, false, true}, + RInt32: []int32{-3, -4, -5}, + RInt64: []int64{-123456789, -987654321}, + RUint32: []uint32{1, 2, 3}, + RUint64: []uint64{6789012345, 3456789012}, + RSint32: []int32{-1, -2, -3}, + RSint64: []int64{-6789012345, -3456789012}, + RFloat: []float32{3.14, 6.28}, + RDouble: []float64{299792458, 6.62606957e-34}, + RString: []string{"happy", "days"}, + RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")}, + } + + repeatsObjectJSON = `{` + + `"rBool":[true,false,true],` + + `"rInt32":[-3,-4,-5],` + + `"rInt64":["-123456789","-987654321"],` + + `"rUint32":[1,2,3],` + + `"rUint64":["6789012345","3456789012"],` + + `"rSint32":[-1,-2,-3],` + + `"rSint64":["-6789012345","-3456789012"],` + + `"rFloat":[3.14,6.28],` + + `"rDouble":[2.99792458e+08,6.62606957e-34],` + + `"rString":["happy","days"],` + + `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` + + `}` + + repeatsObjectPrettyJSON = `{ + "rBool": [ + true, + false, + true + ], + "rInt32": [ + -3, + -4, + -5 + ], + "rInt64": [ + "-123456789", + "-987654321" + ], + "rUint32": [ + 1, + 2, + 3 + ], + "rUint64": [ + "6789012345", + "3456789012" + ], + "rSint32": [ + -1, + -2, + -3 + ], + "rSint64": [ + "-6789012345", + "-3456789012" + ], + "rFloat": [ + 3.14, + 6.28 + ], + "rDouble": [ + 2.99792458e+08, + 6.62606957e-34 + ], + "rString": [ + "happy", + "days" + ], + "rBytes": [ + "c2tpdHRsZXM=", + "bSZtJ3M=" + ] +}` + + innerSimple = &pb.Simple{OInt32: proto.Int32(-32)} + innerSimple2 = &pb.Simple{OInt64: proto.Int64(25)} + innerRepeats = &pb.Repeats{RString: []string{"roses", "red"}} + innerRepeats2 = &pb.Repeats{RString: []string{"violets", "blue"}} + complexObject = &pb.Widget{ + Color: pb.Widget_GREEN.Enum(), + RColor: []pb.Widget_Color{pb.Widget_RED, pb.Widget_GREEN, pb.Widget_BLUE}, + Simple: innerSimple, + RSimple: []*pb.Simple{innerSimple, innerSimple2}, + Repeats: innerRepeats, + RRepeats: []*pb.Repeats{innerRepeats, innerRepeats2}, + } + + complexObjectJSON = `{"color":"GREEN",` + + `"rColor":["RED","GREEN","BLUE"],` + + `"simple":{"oInt32":-32},` + + `"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` + + `"repeats":{"rString":["roses","red"]},` + + `"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` + + `}` + + complexObjectPrettyJSON = `{ + "color": "GREEN", + "rColor": [ + "RED", + "GREEN", + "BLUE" + ], + "simple": { + "oInt32": -32 + }, + "rSimple": [ + { + "oInt32": -32 + }, + { + "oInt64": "25" + } + ], + "repeats": { + "rString": [ + "roses", + "red" + ] + }, + "rRepeats": [ + { + "rString": [ + "roses", + "red" + ] + }, + { + "rString": [ + "violets", + "blue" + ] + } + ] +}` + + colorPrettyJSON = `{ + "color": 2 +}` + + colorListPrettyJSON = `{ + "color": 1000, + "rColor": [ + "RED" + ] +}` + + nummyPrettyJSON = `{ + "nummy": { + "1": 2, + "3": 4 + } +}` + + objjyPrettyJSON = `{ + "objjy": { + "1": { + "dub": 1 + } + } +}` + realNumber = &pb.Real{Value: proto.Float64(3.14159265359)} + realNumberName = "Pi" + complexNumber = &pb.Complex{Imaginary: proto.Float64(0.5772156649)} + realNumberJSON = `{` + + `"value":3.14159265359,` + + `"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` + + `"[jsonpb.name]":"Pi"` + + `}` + + anySimple = &pb.KnownTypes{ + An: &anypb.Any{ + TypeUrl: "something.example.com/jsonpb.Simple", + Value: []byte{ + // &pb.Simple{OBool:true} + 1 << 3, 1, + }, + }, + } + anySimpleJSON = `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}` + anySimplePrettyJSON = `{ + "an": { + "@type": "something.example.com/jsonpb.Simple", + "oBool": true + } +}` + + anyWellKnown = &pb.KnownTypes{ + An: &anypb.Any{ + TypeUrl: "type.googleapis.com/google.protobuf.Duration", + Value: []byte{ + // &durpb.Duration{Seconds: 1, Nanos: 212000000 } + 1 << 3, 1, // seconds + 2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos + }, + }, + } + anyWellKnownJSON = `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}` + anyWellKnownPrettyJSON = `{ + "an": { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } +}` +) + +func init() { + if err := proto.SetExtension(realNumber, pb.E_Name, &realNumberName); err != nil { + panic(err) + } + if err := proto.SetExtension(realNumber, pb.E_Complex_RealExtension, complexNumber); err != nil { + panic(err) + } +} + +var marshalingTests = []struct { + desc string + marshaler Marshaler + pb proto.Message + json string +}{ + {"simple flat object", marshaler, simpleObject, simpleObjectJSON}, + {"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectPrettyJSON}, + {"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON}, + {"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON}, + {"nested message/enum flat object", marshaler, complexObject, complexObjectJSON}, + {"nested message/enum pretty object", marshalerAllOptions, complexObject, complexObjectPrettyJSON}, + {"enum-string flat object", Marshaler{}, + &pb.Widget{Color: pb.Widget_BLUE.Enum()}, `{"color":"BLUE"}`}, + {"enum-value pretty object", Marshaler{EnumsAsInts: true, Indent: " "}, + &pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON}, + {"unknown enum value object", marshalerAllOptions, + &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON}, + {"repeated proto3 enum", Marshaler{}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":["PUNS","SLAPSTICK"]}`}, + {"repeated proto3 enum as int", Marshaler{EnumsAsInts: true}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":[1,2]}`}, + {"empty value", marshaler, &pb.Simple3{}, `{}`}, + {"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`}, + {"map", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`}, + {"map", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON}, + {"map", marshaler, + &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}, + `{"strry":{"\"one\"":"two","three":"four"}}`}, + {"map", marshaler, + &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}, `{"objjy":{"1":{"dub":1}}}`}, + {"map", marshalerAllOptions, + &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}, objjyPrettyJSON}, + {"map", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}}, + `{"buggy":{"1234":"yup"}}`}, + {"map", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`}, + // TODO: This is broken. + //{"map", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`}, + {"map", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`}, + {"map", marshaler, &pb.Mappy{S32Booly: map[int32]bool{1: true, 3: false, 10: true, 12: false}}, `{"s32booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{S64Booly: map[int64]bool{1: true, 3: false, 10: true, 12: false}}, `{"s64booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{U32Booly: map[uint32]bool{1: true, 3: false, 10: true, 12: false}}, `{"u32booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{U64Booly: map[uint64]bool{1: true, 3: false, 10: true, 12: false}}, `{"u64booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"proto2 map", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}}, + `{"mInt64Str":{"213":"cat"}}`}, + {"proto2 map", marshaler, + &pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: &pb.Simple{OInt32: proto.Int32(1)}}}, + `{"mBoolSimple":{"true":{"oInt32":1}}}`}, + {"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`}, + {"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`}, + {"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)}, + `{"o_int32":4}`}, + {"proto2 extension", marshaler, realNumber, realNumberJSON}, + {"Any with message", marshaler, anySimple, anySimpleJSON}, + {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON}, + {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, + {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`}, + {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ + Fields: map[string]*stpb.Value{ + "one": &stpb.Value{Kind: &stpb.Value_StringValue{"loneliest number"}}, + "two": &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}, + }, + }}, `{"st":{"one":"loneliest number","two":null}}`}, + {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`}, + + {"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`}, + {"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`}, + {"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`}, + {"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`}, + {"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`}, + {"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`}, + {"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`}, + {"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`}, + {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`}, +} + +func TestMarshaling(t *testing.T) { + for _, tt := range marshalingTests { + json, err := tt.marshaler.MarshalToString(tt.pb) + if err != nil { + t.Errorf("%s: marshaling error: %v", tt.desc, err) + } else if tt.json != json { + t.Errorf("%s: got [%v] want [%v]", tt.desc, json, tt.json) + } + } +} + +var unmarshalingTests = []struct { + desc string + unmarshaler Unmarshaler + json string + pb proto.Message +}{ + {"simple flat object", Unmarshaler{}, simpleObjectJSON, simpleObject}, + {"simple pretty object", Unmarshaler{}, simpleObjectPrettyJSON, simpleObject}, + {"repeated fields flat object", Unmarshaler{}, repeatsObjectJSON, repeatsObject}, + {"repeated fields pretty object", Unmarshaler{}, repeatsObjectPrettyJSON, repeatsObject}, + {"nested message/enum flat object", Unmarshaler{}, complexObjectJSON, complexObject}, + {"nested message/enum pretty object", Unmarshaler{}, complexObjectPrettyJSON, complexObject}, + {"enum-string object", Unmarshaler{}, `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"enum-value object", Unmarshaler{}, "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"unknown field with allowed option", Unmarshaler{AllowUnknownFields: true}, `{"unknown": "foo"}`, new(pb.Simple)}, + {"proto3 enum string", Unmarshaler{}, `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 enum value", Unmarshaler{}, `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"unknown enum value object", + Unmarshaler{}, + "{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}", + &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}}, + {"repeated proto3 enum", Unmarshaler{}, `{"rFunny":["PUNS","SLAPSTICK"]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as int", Unmarshaler{}, `{"rFunny":[1,2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as mix of strings and ints", Unmarshaler{}, `{"rFunny":["PUNS",2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, + {"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, + {"map", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, + {"map", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, + {"map", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}}, + // TODO: This is broken. + //{"map", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"map", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}}, + {"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, + {"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, + {"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + + {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, + {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, + + {"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}}, + {"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}}, + {"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}}, + {"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}}, + {"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}}, + {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, + {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, + {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, + {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, + // `null` is also a permissible value. Let's just test one. + {"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{}}}, +} + +func TestUnmarshaling(t *testing.T) { + for _, tt := range unmarshalingTests { + // Make a new instance of the type of our expected object. + p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) + + err := tt.unmarshaler.Unmarshal(strings.NewReader(tt.json), p) + if err != nil { + t.Errorf("%s: %v", tt.desc, err) + continue + } + + // For easier diffs, compare text strings of the protos. + exp := proto.MarshalTextString(tt.pb) + act := proto.MarshalTextString(p) + if string(exp) != string(act) { + t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) + } + } +} + +func TestUnmarshalNext(t *testing.T) { + // We only need to check against a few, not all of them. + tests := unmarshalingTests[:5] + + // Create a buffer with many concatenated JSON objects. + var b bytes.Buffer + for _, tt := range tests { + b.WriteString(tt.json) + } + + dec := json.NewDecoder(&b) + for _, tt := range tests { + // Make a new instance of the type of our expected object. + p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) + + err := tt.unmarshaler.UnmarshalNext(dec, p) + if err != nil { + t.Errorf("%s: %v", tt.desc, err) + continue + } + + // For easier diffs, compare text strings of the protos. + exp := proto.MarshalTextString(tt.pb) + act := proto.MarshalTextString(p) + if string(exp) != string(act) { + t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) + } + } + + p := &pb.Simple{} + err := new(Unmarshaler).UnmarshalNext(dec, p) + if err != io.EOF { + t.Errorf("eof: got %v, expected io.EOF", err) + } +} + +var unmarshalingShouldError = []struct { + desc string + in string + pb proto.Message +}{ + {"a value", "666", new(pb.Simple)}, + {"gibberish", "{adskja123;l23=-=", new(pb.Simple)}, + {"unknown field", `{"unknown": "foo"}`, new(pb.Simple)}, + {"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)}, +} + +func TestUnmarshalingBadInput(t *testing.T) { + for _, tt := range unmarshalingShouldError { + err := UnmarshalString(tt.in, tt.pb) + if err == nil { + t.Errorf("an error was expected when parsing %q instead of an object", tt.desc) + } + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile new file mode 100644 index 00000000000..eeda8ae531a --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2015 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go new file mode 100644 index 00000000000..101db7a0aca --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. +// source: more_test_objects.proto +// DO NOT EDIT! + +/* +Package jsonpb is a generated protocol buffer package. + +It is generated from these files: + more_test_objects.proto + test_objects.proto + +It has these top-level messages: + Simple3 + Mappy + Simple + Repeats + Widget + Maps + MsgWithOneof + Real + Complex + KnownTypes +*/ +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Numeral int32 + +const ( + Numeral_UNKNOWN Numeral = 0 + Numeral_ARABIC Numeral = 1 + Numeral_ROMAN Numeral = 2 +) + +var Numeral_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARABIC", + 2: "ROMAN", +} +var Numeral_value = map[string]int32{ + "UNKNOWN": 0, + "ARABIC": 1, + "ROMAN": 2, +} + +func (x Numeral) String() string { + return proto.EnumName(Numeral_name, int32(x)) +} +func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Simple3 struct { + Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` +} + +func (m *Simple3) Reset() { *m = Simple3{} } +func (m *Simple3) String() string { return proto.CompactTextString(m) } +func (*Simple3) ProtoMessage() {} +func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Mappy struct { + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` + S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *Mappy) Reset() { *m = Mappy{} } +func (m *Mappy) String() string { return proto.CompactTextString(m) } +func (*Mappy) ProtoMessage() {} +func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Mappy) GetNummy() map[int64]int32 { + if m != nil { + return m.Nummy + } + return nil +} + +func (m *Mappy) GetStrry() map[string]string { + if m != nil { + return m.Strry + } + return nil +} + +func (m *Mappy) GetObjjy() map[int32]*Simple3 { + if m != nil { + return m.Objjy + } + return nil +} + +func (m *Mappy) GetBuggy() map[int64]string { + if m != nil { + return m.Buggy + } + return nil +} + +func (m *Mappy) GetBooly() map[bool]bool { + if m != nil { + return m.Booly + } + return nil +} + +func (m *Mappy) GetEnumy() map[string]Numeral { + if m != nil { + return m.Enumy + } + return nil +} + +func (m *Mappy) GetS32Booly() map[int32]bool { + if m != nil { + return m.S32Booly + } + return nil +} + +func (m *Mappy) GetS64Booly() map[int64]bool { + if m != nil { + return m.S64Booly + } + return nil +} + +func (m *Mappy) GetU32Booly() map[uint32]bool { + if m != nil { + return m.U32Booly + } + return nil +} + +func (m *Mappy) GetU64Booly() map[uint64]bool { + if m != nil { + return m.U64Booly + } + return nil +} + +func init() { + proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") + proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) +} + +func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 442 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x94, 0xcf, 0xab, 0xd3, 0x40, + 0x10, 0xc7, 0x4d, 0xf3, 0xf2, 0x6b, 0xca, 0xd3, 0xb0, 0x08, 0x06, 0xdf, 0x45, 0x1e, 0x08, 0x45, + 0x30, 0x87, 0x56, 0xf4, 0xa1, 0xa7, 0x56, 0x7a, 0x28, 0xd2, 0x14, 0x52, 0x8a, 0xc7, 0xd2, 0xe8, + 0x52, 0xac, 0x49, 0x13, 0xf2, 0x43, 0xd8, 0x3f, 0x5e, 0x30, 0xb3, 0xd9, 0x34, 0x9b, 0xb0, 0xa5, + 0xde, 0x36, 0x7c, 0x3f, 0x9f, 0xce, 0xec, 0xce, 0x50, 0x78, 0x95, 0xa4, 0x39, 0xdd, 0x97, 0xb4, + 0x28, 0xf7, 0x69, 0x74, 0xa2, 0x3f, 0xca, 0xc2, 0xcf, 0xf2, 0xb4, 0x4c, 0x89, 0x79, 0x2a, 0xd2, + 0x73, 0x16, 0x3d, 0x3e, 0x80, 0xb5, 0xfd, 0x95, 0x64, 0x31, 0x9d, 0x11, 0x17, 0xf4, 0x9f, 0x55, + 0xe4, 0x69, 0x6f, 0xb4, 0x89, 0x16, 0xe2, 0xf1, 0xf1, 0xaf, 0x0d, 0xc6, 0xfa, 0x90, 0x65, 0x8c, + 0xf8, 0x60, 0x9c, 0xab, 0x24, 0x61, 0x75, 0xaa, 0x4f, 0xc6, 0x53, 0xcf, 0x6f, 0x74, 0x9f, 0xa7, + 0x7e, 0x80, 0xd1, 0xf2, 0x5c, 0xe6, 0x2c, 0x6c, 0x30, 0xe4, 0x8b, 0x32, 0xcf, 0x99, 0x37, 0x52, + 0xf1, 0x5b, 0x8c, 0x04, 0xcf, 0x31, 0xe4, 0xeb, 0xfe, 0x4e, 0xcc, 0xd3, 0x55, 0xfc, 0x06, 0x23, + 0xc1, 0x73, 0x0c, 0xf9, 0xa8, 0x3a, 0x1e, 0x99, 0x77, 0xa7, 0xe2, 0x17, 0x18, 0x09, 0x9e, 0x63, + 0x9c, 0x4f, 0xd3, 0x98, 0x79, 0x86, 0x92, 0xc7, 0xa8, 0xe5, 0xf1, 0x8c, 0x3c, 0xad, 0x6f, 0xc2, + 0x3c, 0x53, 0xc5, 0x2f, 0x31, 0x12, 0x3c, 0xc7, 0xc8, 0x27, 0xb0, 0x8b, 0xd9, 0xb4, 0x29, 0x61, + 0x71, 0xe5, 0x61, 0x70, 0x65, 0x91, 0x36, 0xd6, 0x05, 0xe6, 0xe2, 0xc7, 0x0f, 0x8d, 0x68, 0x2b, + 0x45, 0x91, 0xb6, 0xa2, 0xf8, 0x44, 0xb1, 0x6a, 0x2b, 0x3a, 0x2a, 0x71, 0xd7, 0xaf, 0x58, 0x49, + 0x15, 0xab, 0xb6, 0x22, 0x28, 0xc5, 0x7e, 0xc5, 0x16, 0x7e, 0xfd, 0x04, 0xd0, 0x0d, 0x1a, 0xb7, + 0xe5, 0x37, 0x65, 0x7c, 0x5b, 0xf4, 0x10, 0x8f, 0xe4, 0x25, 0x18, 0x7f, 0x0e, 0x71, 0x45, 0xeb, + 0x99, 0x6b, 0x13, 0x23, 0x6c, 0x3e, 0x3e, 0x8f, 0x9e, 0x34, 0x34, 0xbb, 0x91, 0xcb, 0xa6, 0xa3, + 0x30, 0x1d, 0xd9, 0x5c, 0x01, 0x74, 0xc3, 0x97, 0x4d, 0xa3, 0x31, 0xdf, 0xca, 0xe6, 0x78, 0xfa, + 0xa2, 0xbd, 0x89, 0xd8, 0xe9, 0x41, 0x13, 0xdd, 0x5e, 0xdc, 0x6a, 0xdf, 0x19, 0x9a, 0x97, 0x07, + 0x91, 0x4d, 0x5b, 0x61, 0xda, 0x83, 0xf6, 0xbb, 0x5d, 0x51, 0x5c, 0xbc, 0xd7, 0xfe, 0xf3, 0xae, + 0xfd, 0xfa, 0x9d, 0x69, 0x7e, 0x88, 0xe5, 0x9f, 0xfa, 0x02, 0xf7, 0xbd, 0x1d, 0x52, 0x3c, 0xc6, + 0xf5, 0x3e, 0x50, 0x96, 0xa7, 0x7a, 0xeb, 0xfa, 0x43, 0x79, 0x77, 0xad, 0xf2, 0xfd, 0xff, 0xc8, + 0xd7, 0x2a, 0xdf, 0xdd, 0x90, 0xdf, 0xbd, 0x07, 0x4b, 0xbc, 0x04, 0x19, 0x83, 0xb5, 0x0b, 0xbe, + 0x05, 0x9b, 0xef, 0x81, 0xfb, 0x8c, 0x00, 0x98, 0xf3, 0x70, 0xbe, 0x58, 0x7d, 0x75, 0x35, 0xe2, + 0x80, 0x11, 0x6e, 0xd6, 0xf3, 0xc0, 0x1d, 0x45, 0x26, 0xff, 0x6b, 0x9b, 0xfd, 0x0b, 0x00, 0x00, + 0xff, 0xff, 0xa2, 0x4b, 0xe1, 0x77, 0xf5, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto new file mode 100644 index 00000000000..43b440e2dd5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto @@ -0,0 +1,57 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package jsonpb; + +message Simple3 { + double dub = 1; +} + +enum Numeral { + UNKNOWN = 0; + ARABIC = 1; + ROMAN = 2; +} + +message Mappy { + map nummy = 1; + map strry = 2; + map objjy = 3; + map buggy = 4; + map booly = 5; + map enumy = 6; + map s32booly = 7; + map s64booly = 8; + map u32booly = 9; + map u64booly = 10; +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go new file mode 100644 index 00000000000..b0b5b045299 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -0,0 +1,768 @@ +// Code generated by protoc-gen-go. +// source: test_objects.proto +// DO NOT EDIT! + +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Widget_Color int32 + +const ( + Widget_RED Widget_Color = 0 + Widget_GREEN Widget_Color = 1 + Widget_BLUE Widget_Color = 2 +) + +var Widget_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Widget_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Widget_Color) Enum() *Widget_Color { + p := new(Widget_Color) + *p = x + return p +} +func (x Widget_Color) String() string { + return proto.EnumName(Widget_Color_name, int32(x)) +} +func (x *Widget_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color") + if err != nil { + return err + } + *x = Widget_Color(value) + return nil +} +func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{2, 0} } + +// Test message for holding primitive types. +type Simple struct { + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Simple) Reset() { *m = Simple{} } +func (m *Simple) String() string { return proto.CompactTextString(m) } +func (*Simple) ProtoMessage() {} +func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Simple) GetOBool() bool { + if m != nil && m.OBool != nil { + return *m.OBool + } + return false +} + +func (m *Simple) GetOInt32() int32 { + if m != nil && m.OInt32 != nil { + return *m.OInt32 + } + return 0 +} + +func (m *Simple) GetOInt64() int64 { + if m != nil && m.OInt64 != nil { + return *m.OInt64 + } + return 0 +} + +func (m *Simple) GetOUint32() uint32 { + if m != nil && m.OUint32 != nil { + return *m.OUint32 + } + return 0 +} + +func (m *Simple) GetOUint64() uint64 { + if m != nil && m.OUint64 != nil { + return *m.OUint64 + } + return 0 +} + +func (m *Simple) GetOSint32() int32 { + if m != nil && m.OSint32 != nil { + return *m.OSint32 + } + return 0 +} + +func (m *Simple) GetOSint64() int64 { + if m != nil && m.OSint64 != nil { + return *m.OSint64 + } + return 0 +} + +func (m *Simple) GetOFloat() float32 { + if m != nil && m.OFloat != nil { + return *m.OFloat + } + return 0 +} + +func (m *Simple) GetODouble() float64 { + if m != nil && m.ODouble != nil { + return *m.ODouble + } + return 0 +} + +func (m *Simple) GetOString() string { + if m != nil && m.OString != nil { + return *m.OString + } + return "" +} + +func (m *Simple) GetOBytes() []byte { + if m != nil { + return m.OBytes + } + return nil +} + +// Test message for holding repeated primitives. +type Repeats struct { + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Repeats) Reset() { *m = Repeats{} } +func (m *Repeats) String() string { return proto.CompactTextString(m) } +func (*Repeats) ProtoMessage() {} +func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Repeats) GetRBool() []bool { + if m != nil { + return m.RBool + } + return nil +} + +func (m *Repeats) GetRInt32() []int32 { + if m != nil { + return m.RInt32 + } + return nil +} + +func (m *Repeats) GetRInt64() []int64 { + if m != nil { + return m.RInt64 + } + return nil +} + +func (m *Repeats) GetRUint32() []uint32 { + if m != nil { + return m.RUint32 + } + return nil +} + +func (m *Repeats) GetRUint64() []uint64 { + if m != nil { + return m.RUint64 + } + return nil +} + +func (m *Repeats) GetRSint32() []int32 { + if m != nil { + return m.RSint32 + } + return nil +} + +func (m *Repeats) GetRSint64() []int64 { + if m != nil { + return m.RSint64 + } + return nil +} + +func (m *Repeats) GetRFloat() []float32 { + if m != nil { + return m.RFloat + } + return nil +} + +func (m *Repeats) GetRDouble() []float64 { + if m != nil { + return m.RDouble + } + return nil +} + +func (m *Repeats) GetRString() []string { + if m != nil { + return m.RString + } + return nil +} + +func (m *Repeats) GetRBytes() [][]byte { + if m != nil { + return m.RBytes + } + return nil +} + +// Test message for holding enums and nested messages. +type Widget struct { + Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` + Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Widget) Reset() { *m = Widget{} } +func (m *Widget) String() string { return proto.CompactTextString(m) } +func (*Widget) ProtoMessage() {} +func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *Widget) GetColor() Widget_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Widget_RED +} + +func (m *Widget) GetRColor() []Widget_Color { + if m != nil { + return m.RColor + } + return nil +} + +func (m *Widget) GetSimple() *Simple { + if m != nil { + return m.Simple + } + return nil +} + +func (m *Widget) GetRSimple() []*Simple { + if m != nil { + return m.RSimple + } + return nil +} + +func (m *Widget) GetRepeats() *Repeats { + if m != nil { + return m.Repeats + } + return nil +} + +func (m *Widget) GetRRepeats() []*Repeats { + if m != nil { + return m.RRepeats + } + return nil +} + +type Maps struct { + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Maps) Reset() { *m = Maps{} } +func (m *Maps) String() string { return proto.CompactTextString(m) } +func (*Maps) ProtoMessage() {} +func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *Maps) GetMInt64Str() map[int64]string { + if m != nil { + return m.MInt64Str + } + return nil +} + +func (m *Maps) GetMBoolSimple() map[bool]*Simple { + if m != nil { + return m.MBoolSimple + } + return nil +} + +type MsgWithOneof struct { + // Types that are valid to be assigned to Union: + // *MsgWithOneof_Title + // *MsgWithOneof_Salary + // *MsgWithOneof_Country + // *MsgWithOneof_HomeAddress + Union isMsgWithOneof_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } +func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } +func (*MsgWithOneof) ProtoMessage() {} +func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +type isMsgWithOneof_Union interface { + isMsgWithOneof_Union() +} + +type MsgWithOneof_Title struct { + Title string `protobuf:"bytes,1,opt,name=title,oneof"` +} +type MsgWithOneof_Salary struct { + Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"` +} +type MsgWithOneof_Country struct { + Country string `protobuf:"bytes,3,opt,name=Country,json=country,oneof"` +} +type MsgWithOneof_HomeAddress struct { + HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` +} + +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} +func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} + +func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *MsgWithOneof) GetTitle() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok { + return x.Title + } + return "" +} + +func (m *MsgWithOneof) GetSalary() int64 { + if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok { + return x.Salary + } + return 0 +} + +func (m *MsgWithOneof) GetCountry() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok { + return x.Country + } + return "" +} + +func (m *MsgWithOneof) GetHomeAddress() string { + if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok { + return x.HomeAddress + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ + (*MsgWithOneof_Title)(nil), + (*MsgWithOneof_Salary)(nil), + (*MsgWithOneof_Country)(nil), + (*MsgWithOneof_HomeAddress)(nil), + } +} + +func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Title) + case *MsgWithOneof_Salary: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Country) + case *MsgWithOneof_HomeAddress: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.HomeAddress) + case nil: + default: + return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) + } + return nil +} + +func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*MsgWithOneof) + switch tag { + case 1: // union.title + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Title{x} + return true, err + case 2: // union.salary + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &MsgWithOneof_Salary{int64(x)} + return true, err + case 3: // union.Country + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Country{x} + return true, err + case 4: // union.home_address + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_HomeAddress{x} + return true, err + default: + return false, nil + } +} + +func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Title))) + n += len(x.Title) + case *MsgWithOneof_Salary: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Country))) + n += len(x.Country) + case *MsgWithOneof_HomeAddress: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.HomeAddress))) + n += len(x.HomeAddress) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Real struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Real) Reset() { *m = Real{} } +func (m *Real) String() string { return proto.CompactTextString(m) } +func (*Real) ProtoMessage() {} +func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +var extRange_Real = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Real) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Real +} + +func (m *Real) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Complex struct { + Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Complex) Reset() { *m = Complex{} } +func (m *Complex) String() string { return proto.CompactTextString(m) } +func (*Complex) ProtoMessage() {} +func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +var extRange_Complex = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Complex +} + +func (m *Complex) GetImaginary() float64 { + if m != nil && m.Imaginary != nil { + return *m.Imaginary + } + return 0 +} + +var E_Complex_RealExtension = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*Complex)(nil), + Field: 123, + Name: "jsonpb.Complex.real_extension", + Tag: "bytes,123,opt,name=real_extension,json=realExtension", +} + +type KnownTypes struct { + An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` + Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` + St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` + Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` + Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` + Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` + I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` + U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` + I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` + U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` + Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` + Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` + Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KnownTypes) Reset() { *m = KnownTypes{} } +func (m *KnownTypes) String() string { return proto.CompactTextString(m) } +func (*KnownTypes) ProtoMessage() {} +func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *KnownTypes) GetAn() *google_protobuf.Any { + if m != nil { + return m.An + } + return nil +} + +func (m *KnownTypes) GetDur() *google_protobuf1.Duration { + if m != nil { + return m.Dur + } + return nil +} + +func (m *KnownTypes) GetSt() *google_protobuf2.Struct { + if m != nil { + return m.St + } + return nil +} + +func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp { + if m != nil { + return m.Ts + } + return nil +} + +func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue { + if m != nil { + return m.Dbl + } + return nil +} + +func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue { + if m != nil { + return m.Flt + } + return nil +} + +func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value { + if m != nil { + return m.I64 + } + return nil +} + +func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value { + if m != nil { + return m.U64 + } + return nil +} + +func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value { + if m != nil { + return m.I32 + } + return nil +} + +func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value { + if m != nil { + return m.U32 + } + return nil +} + +func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue { + if m != nil { + return m.Bool + } + return nil +} + +func (m *KnownTypes) GetStr() *google_protobuf4.StringValue { + if m != nil { + return m.Str + } + return nil +} + +func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue { + if m != nil { + return m.Bytes + } + return nil +} + +var E_Name = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*string)(nil), + Field: 124, + Name: "jsonpb.name", + Tag: "bytes,124,opt,name=name", +} + +func init() { + proto.RegisterType((*Simple)(nil), "jsonpb.Simple") + proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") + proto.RegisterType((*Widget)(nil), "jsonpb.Widget") + proto.RegisterType((*Maps)(nil), "jsonpb.Maps") + proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") + proto.RegisterType((*Real)(nil), "jsonpb.Real") + proto.RegisterType((*Complex)(nil), "jsonpb.Complex") + proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") + proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) + proto.RegisterExtension(E_Complex_RealExtension) + proto.RegisterExtension(E_Name) +} + +func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1033 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x55, 0xdd, 0x72, 0xdb, 0x44, + 0x14, 0xae, 0x25, 0xcb, 0xb2, 0xd7, 0x49, 0x30, 0x3b, 0x29, 0x55, 0x4c, 0x80, 0x8e, 0x81, 0x02, + 0x05, 0xdc, 0xc1, 0xed, 0x74, 0x98, 0xc2, 0x4d, 0xd2, 0x98, 0x9f, 0x81, 0x94, 0x99, 0x4d, 0x43, + 0x2f, 0x3d, 0x72, 0xbc, 0x71, 0x55, 0x24, 0xad, 0x67, 0xb5, 0x22, 0xf5, 0xc0, 0x05, 0xd7, 0x5c, + 0xf3, 0x0a, 0xf0, 0x08, 0x3c, 0x11, 0x0f, 0xc2, 0x39, 0x67, 0xf5, 0xe3, 0xd8, 0x75, 0x73, 0x13, + 0x9f, 0xfd, 0xbe, 0xf3, 0xe9, 0xec, 0xb7, 0x67, 0xcf, 0x32, 0x6e, 0x64, 0x66, 0x26, 0x6a, 0xfa, + 0x42, 0x5e, 0x98, 0x6c, 0xb8, 0xd0, 0xca, 0x28, 0xde, 0x7a, 0x91, 0xa9, 0x74, 0x31, 0xed, 0x1f, + 0xcc, 0x95, 0x9a, 0xc7, 0xf2, 0x1e, 0xad, 0x4e, 0xf3, 0xcb, 0x7b, 0x61, 0xba, 0xb4, 0x94, 0xfe, + 0xbb, 0xeb, 0xd0, 0x2c, 0xd7, 0xa1, 0x89, 0x54, 0x5a, 0xe0, 0x87, 0xeb, 0x78, 0x66, 0x74, 0x7e, + 0x61, 0x0a, 0xf4, 0xbd, 0x75, 0xd4, 0x44, 0x09, 0x94, 0x11, 0x26, 0x8b, 0x6d, 0xf2, 0x57, 0x3a, + 0x5c, 0x2c, 0xa4, 0x2e, 0x2a, 0x1c, 0xfc, 0xed, 0xb0, 0xd6, 0x59, 0x94, 0x2c, 0x62, 0xc9, 0x6f, + 0xb2, 0x96, 0x9a, 0x4c, 0x95, 0x8a, 0x83, 0xc6, 0xed, 0xc6, 0xc7, 0x6d, 0xe1, 0xa9, 0x63, 0x08, + 0xf8, 0x2d, 0xe6, 0xab, 0x49, 0x94, 0x9a, 0xfb, 0xa3, 0xc0, 0x81, 0x75, 0x4f, 0xb4, 0xd4, 0xf7, + 0x18, 0x55, 0xc0, 0xc3, 0x07, 0x81, 0x0b, 0x80, 0x6b, 0x81, 0x87, 0x0f, 0xf8, 0x01, 0x6b, 0xab, + 0x49, 0x6e, 0x53, 0x9a, 0x80, 0xec, 0x0a, 0x5f, 0x9d, 0x53, 0x58, 0x43, 0x90, 0xe4, 0x01, 0xd4, + 0x2c, 0xa0, 0x32, 0x2b, 0xb3, 0x59, 0x2d, 0x80, 0xde, 0x04, 0xe8, 0x6c, 0x25, 0x2b, 0xb3, 0x59, + 0x3e, 0x40, 0xbc, 0x80, 0x20, 0x8b, 0x8a, 0xb8, 0x8c, 0x55, 0x68, 0x82, 0x36, 0x20, 0x0e, 0x14, + 0xf1, 0x0d, 0x46, 0x36, 0x67, 0xa6, 0xf2, 0x69, 0x2c, 0x83, 0x0e, 0x20, 0x0d, 0xc8, 0x39, 0xa1, + 0xb0, 0x90, 0x33, 0x3a, 0x4a, 0xe7, 0x01, 0x03, 0xa8, 0x83, 0x72, 0x14, 0x5a, 0xb9, 0xe9, 0x12, + 0x8e, 0x32, 0xe8, 0x02, 0xb2, 0x03, 0x72, 0xc7, 0x18, 0x0d, 0xfe, 0x71, 0x98, 0x2f, 0xe4, 0x42, + 0x86, 0x26, 0x43, 0xa3, 0x74, 0x69, 0x94, 0x8b, 0x46, 0xe9, 0xd2, 0x28, 0x5d, 0x19, 0xe5, 0xa2, + 0x51, 0xba, 0x32, 0x4a, 0x57, 0x46, 0xb9, 0x68, 0x94, 0xae, 0x8c, 0xd2, 0xb5, 0x51, 0x2e, 0x1a, + 0xa5, 0x6b, 0xa3, 0x74, 0x6d, 0x94, 0x8b, 0x46, 0xe9, 0xda, 0x28, 0x5d, 0x1b, 0xe5, 0xa2, 0x51, + 0xfa, 0x6c, 0x25, 0xab, 0x32, 0xca, 0x45, 0xa3, 0x74, 0x6d, 0x94, 0xae, 0x8c, 0x72, 0xd1, 0x28, + 0x5d, 0x19, 0xa5, 0x6b, 0xa3, 0x5c, 0x34, 0x4a, 0xd7, 0x46, 0xe9, 0xda, 0x28, 0x17, 0x8d, 0xd2, + 0xb5, 0x51, 0xba, 0x32, 0xca, 0x45, 0xa3, 0xb4, 0x35, 0xea, 0x5f, 0x68, 0xa8, 0x67, 0xd1, 0x6c, + 0x2e, 0x0d, 0xbf, 0xcb, 0xbc, 0x0b, 0x15, 0x2b, 0x4d, 0xfd, 0xb4, 0x37, 0xda, 0x1f, 0xda, 0xdb, + 0x30, 0xb4, 0xf0, 0xf0, 0x31, 0x62, 0xc2, 0x52, 0xf8, 0xe7, 0xa8, 0x67, 0xd9, 0x68, 0xde, 0x36, + 0x76, 0x4b, 0xd3, 0x7f, 0x7e, 0x87, 0xb5, 0x32, 0xea, 0x5a, 0x3a, 0xc0, 0xee, 0x68, 0xaf, 0x64, + 0xdb, 0x5e, 0x16, 0x05, 0xca, 0x3f, 0xb1, 0x86, 0x10, 0x13, 0xeb, 0xdc, 0x64, 0xa2, 0x41, 0x05, + 0xd5, 0xd7, 0xf6, 0x80, 0x83, 0x7d, 0xd2, 0x7c, 0xa3, 0x64, 0x16, 0xe7, 0x2e, 0x4a, 0x9c, 0x7f, + 0xc6, 0x3a, 0x7a, 0x52, 0x92, 0x6f, 0x92, 0xec, 0x06, 0xb9, 0xad, 0x8b, 0x5f, 0x83, 0x0f, 0x99, + 0x67, 0x8b, 0xf6, 0x99, 0x2b, 0xc6, 0x27, 0xbd, 0x1b, 0xbc, 0xc3, 0xbc, 0x6f, 0xc5, 0x78, 0xfc, + 0xa4, 0xd7, 0xe0, 0x6d, 0xd6, 0x3c, 0xfe, 0xf1, 0x7c, 0xdc, 0x73, 0x06, 0x7f, 0x39, 0xac, 0x79, + 0x1a, 0x2e, 0x32, 0xfe, 0x15, 0xeb, 0x26, 0xb6, 0x5d, 0xd0, 0x7b, 0xea, 0xb1, 0xee, 0xe8, 0xed, + 0x52, 0x1f, 0x29, 0xc3, 0x53, 0xea, 0x1f, 0x38, 0x8a, 0x71, 0x6a, 0xf4, 0x52, 0x74, 0x92, 0x32, + 0xe6, 0x47, 0x6c, 0x37, 0xa1, 0xde, 0x2c, 0x77, 0xed, 0x50, 0xfa, 0x3b, 0xd7, 0xd3, 0xb1, 0x5f, + 0xed, 0xb6, 0xad, 0x40, 0x37, 0xa9, 0x57, 0xfa, 0x5f, 0xb3, 0xbd, 0xeb, 0xfa, 0xbc, 0xc7, 0xdc, + 0x5f, 0xe4, 0x92, 0x8e, 0xd1, 0x15, 0xf8, 0x93, 0xef, 0x33, 0xef, 0xd7, 0x30, 0xce, 0x25, 0x8d, + 0x84, 0x8e, 0xb0, 0xc1, 0x23, 0xe7, 0xcb, 0x46, 0xff, 0x09, 0xeb, 0xad, 0xcb, 0xaf, 0xe6, 0xb7, + 0x6d, 0xfe, 0x07, 0xab, 0xf9, 0x9b, 0x87, 0x52, 0xeb, 0x0d, 0xfe, 0x6c, 0xb0, 0x9d, 0xd3, 0x6c, + 0xfe, 0x2c, 0x32, 0xcf, 0x7f, 0x4a, 0xa5, 0xba, 0xe4, 0x6f, 0x31, 0xcf, 0x44, 0x06, 0x76, 0x86, + 0x72, 0x9d, 0xef, 0x6e, 0x08, 0x1b, 0xf2, 0x00, 0x5a, 0x22, 0x8c, 0x43, 0xbd, 0x24, 0x4d, 0x17, + 0x80, 0x22, 0xe6, 0x7d, 0xe6, 0x3f, 0x56, 0x39, 0x56, 0x42, 0x83, 0x0a, 0x73, 0xfc, 0x0b, 0xbb, + 0xc0, 0xdf, 0x67, 0x3b, 0xcf, 0x55, 0x22, 0x27, 0xe1, 0x6c, 0xa6, 0x65, 0x96, 0xd1, 0xbc, 0x42, + 0x42, 0x17, 0x57, 0x8f, 0xec, 0xe2, 0xb1, 0xcf, 0xbc, 0x3c, 0x85, 0x91, 0x3c, 0xb8, 0xc3, 0x9a, + 0x42, 0x86, 0x71, 0xbd, 0xfd, 0x06, 0x4d, 0x16, 0x1b, 0xdc, 0x6d, 0xb7, 0x67, 0xbd, 0x3f, 0xe0, + 0xcf, 0x19, 0x5c, 0xe1, 0x17, 0x71, 0x27, 0x2f, 0xf9, 0x21, 0xeb, 0x44, 0x49, 0x38, 0x8f, 0x52, + 0xac, 0xcc, 0xd2, 0xeb, 0x85, 0x3a, 0x65, 0x74, 0xc2, 0xf6, 0x34, 0x48, 0x4f, 0xe4, 0x4b, 0x23, + 0xd3, 0x0c, 0x3e, 0xc6, 0x77, 0xea, 0x96, 0x0a, 0xe3, 0xe0, 0xb7, 0xeb, 0x3d, 0x59, 0xc8, 0x8b, + 0x5d, 0x4c, 0x1a, 0x97, 0x39, 0x83, 0xff, 0x9a, 0x8c, 0xfd, 0x90, 0xaa, 0xab, 0xf4, 0xe9, 0x72, + 0x21, 0x33, 0xb0, 0xd9, 0x09, 0xd3, 0x60, 0x8f, 0x52, 0xf7, 0x87, 0xf6, 0x29, 0x18, 0x96, 0x4f, + 0xc1, 0xf0, 0x28, 0x5d, 0x0a, 0xc0, 0xf9, 0xa7, 0xcc, 0x85, 0x47, 0x87, 0x8a, 0xeb, 0x8e, 0x0e, + 0x36, 0x68, 0x27, 0xc5, 0x83, 0x24, 0x90, 0xc5, 0x3f, 0x62, 0x4e, 0x66, 0x82, 0x1d, 0xe2, 0xde, + 0xda, 0xe0, 0x9e, 0xd1, 0xe3, 0x24, 0x80, 0x02, 0xb7, 0xdf, 0x81, 0xdb, 0x61, 0xcf, 0xb7, 0xbf, + 0x41, 0x7c, 0x5a, 0xbe, 0x53, 0x02, 0x58, 0x7c, 0x08, 0x15, 0x4c, 0x63, 0x3a, 0x9d, 0xee, 0xe8, + 0x70, 0xb3, 0x02, 0x1a, 0x47, 0x3f, 0xa3, 0xc9, 0x02, 0x89, 0x30, 0x2d, 0xdc, 0xcb, 0xd8, 0xd0, + 0x61, 0xe1, 0xd5, 0x58, 0xe7, 0xd3, 0x60, 0x2b, 0xe8, 0xc0, 0x43, 0x7a, 0x54, 0x3c, 0x38, 0xaf, + 0xa2, 0x53, 0xb3, 0x17, 0x74, 0xe0, 0x61, 0x35, 0x39, 0xd0, 0x5b, 0x5b, 0xaa, 0x39, 0x5f, 0xe5, + 0x03, 0x91, 0xe4, 0x61, 0x16, 0xfb, 0xdb, 0xe5, 0xef, 0x8f, 0x4a, 0x79, 0x18, 0xd2, 0x28, 0x0f, + 0xf4, 0xf6, 0x6b, 0xe4, 0x2b, 0x7e, 0x4e, 0xfc, 0x26, 0x3d, 0x36, 0x9d, 0x2d, 0x56, 0xe2, 0x6d, + 0xb3, 0x74, 0xe2, 0xa1, 0x3e, 0xce, 0x0d, 0xb6, 0x45, 0xdf, 0x0e, 0xf0, 0x42, 0x1f, 0x88, 0xfc, + 0x0b, 0xe6, 0xd5, 0x2f, 0xde, 0xab, 0x36, 0x40, 0x83, 0xdd, 0x26, 0x58, 0xe6, 0xa3, 0xdb, 0xac, + 0x99, 0x86, 0x89, 0x5c, 0x6b, 0xd1, 0xdf, 0x69, 0x16, 0x10, 0xf2, 0x7f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xda, 0x8b, 0x4a, 0x7a, 0x0e, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto new file mode 100644 index 00000000000..d1934a0742e --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto @@ -0,0 +1,135 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +package jsonpb; + +// Test message for holding primitive types. +message Simple { + optional bool o_bool = 1; + optional int32 o_int32 = 2; + optional int64 o_int64 = 3; + optional uint32 o_uint32 = 4; + optional uint64 o_uint64 = 5; + optional sint32 o_sint32 = 6; + optional sint64 o_sint64 = 7; + optional float o_float = 8; + optional double o_double = 9; + optional string o_string = 10; + optional bytes o_bytes = 11; +} + +// Test message for holding repeated primitives. +message Repeats { + repeated bool r_bool = 1; + repeated int32 r_int32 = 2; + repeated int64 r_int64 = 3; + repeated uint32 r_uint32 = 4; + repeated uint64 r_uint64 = 5; + repeated sint32 r_sint32 = 6; + repeated sint64 r_sint64 = 7; + repeated float r_float = 8; + repeated double r_double = 9; + repeated string r_string = 10; + repeated bytes r_bytes = 11; +} + +// Test message for holding enums and nested messages. +message Widget { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color color = 1; + repeated Color r_color = 2; + + optional Simple simple = 10; + repeated Simple r_simple = 11; + + optional Repeats repeats = 20; + repeated Repeats r_repeats = 21; +} + +message Maps { + map m_int64_str = 1; + map m_bool_simple = 2; +} + +message MsgWithOneof { + oneof union { + string title = 1; + int64 salary = 2; + string Country = 3; + string home_address = 4; + } +} + +message Real { + optional double value = 1; + extensions 100 to max; +} + +extend Real { + optional string name = 124; +} + +message Complex { + extend Real { + optional Complex real_extension = 123; + } + optional double imaginary = 1; + extensions 100 to max; +} + +message KnownTypes { + optional google.protobuf.Any an = 14; + optional google.protobuf.Duration dur = 1; + optional google.protobuf.Struct st = 12; + optional google.protobuf.Timestamp ts = 2; + + optional google.protobuf.DoubleValue dbl = 3; + optional google.protobuf.FloatValue flt = 4; + optional google.protobuf.Int64Value i64 = 5; + optional google.protobuf.UInt64Value u64 = 6; + optional google.protobuf.Int32Value i32 = 7; + optional google.protobuf.UInt32Value u32 = 8; + optional google.protobuf.BoolValue bool = 9; + optional google.protobuf.StringValue str = 10; + optional google.protobuf.BytesValue bytes = 11; +} diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile index f1f06564a15..e2e0651a934 100644 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ b/vendor/github.com/golang/protobuf/proto/Makefile @@ -39,5 +39,5 @@ test: install generate-test-pbs generate-test-pbs: make install make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto make diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go new file mode 100644 index 00000000000..53f0ef782c1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/all_test.go @@ -0,0 +1,2269 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } +func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } +func (f *fakeMarshaler) ProtoMessage() {} +func (f *fakeMarshaler) Reset() {} + +type msgWithFakeMarshaler struct { + M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` +} + +func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } +func (m *msgWithFakeMarshaler) ProtoMessage() {} +func (m *msgWithFakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + wantErr error + }{ + { + name: "Marshaler that fails", + m: &fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), + }, + { + name: "Marshaler that fails with RequiredNotSetError", + m: &msgWithFakeMarshaler{ + M: &fakeMarshaler{ + err: &RequiredNotSetError{}, + b: []byte{5, 6, 7}, + }, + }, + // Since there's an error that can be continued after, + // the buffer should be written. + want: []byte{ + 10, 3, // for &msgWithFakeMarshaler + 5, 6, 7, // for &fakeMarshaler + }, + wantErr: &RequiredNotSetError{}, + }, + { + name: "Marshaler that succeeds", + m: &fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if _, ok := err.(*RequiredNotSetError); ok { + // We're not in package proto, so we can only assert the type in this case. + err = &RequiredNotSetError{} + } + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcementGroups(t *testing.T) { + pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} + if _, err := Marshal(pb); err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { + t.Errorf("marshal: bad error type: %v", err) + } + + buf := []byte{11, 12} + if err := Unmarshal(buf, pb); err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + { + var m *GoEnum + if _, err := Marshal(m); err != ErrNil { + t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) + } + } + + { + m := &Communique{Union: &Communique_Msg{nil}} + if _, err := Marshal(m); err == nil || err == ErrNil { + t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) + } + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m1 := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.MsgMapping[1]; !ok { + t.Error("msg_mapping[1] not present") + } else if v != nil { + t.Errorf("msg_mapping[1] not nil: %v", v) + } +} + +func TestMapFieldWithNilBytes(t *testing.T) { + m1 := &MessageWithMap{ + ByteMapping: map[bool][]byte{ + false: []byte{}, + true: nil, + }, + } + n := Size(m1) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if n != len(b) { + t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.ByteMapping[false]; !ok { + t.Error("byte_mapping[false] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[false] not empty: %#v", v) + } + if v, ok := m2.ByteMapping[true]; !ok { + t.Error("byte_mapping[true] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[true] not empty: %#v", v) + } +} + +func TestDecodeMapFieldMissingKey(t *testing.T) { + b := []byte{ + 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes + // no key + 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing key: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) + } +} + +func TestDecodeMapFieldMissingValue(t *testing.T) { + b := []byte{ + 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes + 0x08, 0x01, // varint key, value 1 + // no value + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing value: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) + } +} + +func TestOneof(t *testing.T) { + m := &Communique{} + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal of empty message with oneof: %v", err) + } + if len(b) != 0 { + t.Errorf("Marshal of empty message yielded too many bytes: %v", b) + } + + m = &Communique{ + Union: &Communique_Name{"Barry"}, + } + + // Round-trip. + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof: %v", err) + } + if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) + t.Errorf("Incorrect marshal of message with oneof: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof: %v", err) + } + if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { + t.Errorf("After round trip, Union = %+v", m.Union) + } + if name := m.GetName(); name != "Barry" { + t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") + } + + // Let's try with a message in the oneof. + m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof set to message: %v", err) + } + if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) + t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof set to message: %v", err) + } + ss, ok := m.Union.(*Communique_Msg) + if !ok || ss.Msg.GetStringField() != "deep deep string" { + t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) + } +} + +func TestInefficientPackedBool(t *testing.T) { + // https://github.com/golang/protobuf/issues/76 + inp := []byte{ + 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes + // Usually a bool should take a single byte, + // but it is permitted to be any varint. + 0xb9, 0x30, + } + if err := Unmarshal(inp, new(MoreRepeated)); err != nil { + t.Error(err) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go new file mode 100644 index 00000000000..1a3c22ed416 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/any_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/proto/proto3_proto" + testpb "github.com/golang/protobuf/proto/testdata" + anypb "github.com/golang/protobuf/ptypes/any" +) + +var ( + expandedMarshaler = proto.TextMarshaler{ExpandAny: true} + expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} +) + +// anyEqual reports whether two messages which may be google.protobuf.Any or may +// contain google.protobuf.Any fields are equal. We can't use proto.Equal for +// comparison, because semantically equivalent messages may be marshaled to +// binary in different tag order. Instead, trust that TextMarshaler with +// ExpandAny option works and compare the text marshaling results. +func anyEqual(got, want proto.Message) bool { + // if messages are proto.Equal, no need to marshal. + if proto.Equal(got, want) { + return true + } + g := expandedMarshaler.Text(got) + w := expandedMarshaler.Text(want) + return g == w +} + +type golden struct { + m proto.Message + t, c string +} + +var goldenMessages = makeGolden() + +func makeGolden() []golden { + nested := &pb.Nested{Bunny: "Monty"} + nb, err := proto.Marshal(nested) + if err != nil { + panic(err) + } + m1 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m2 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m3 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, + } + m4 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, + } + m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} + + any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) + proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) + any1b, err := proto.Marshal(any1) + if err != nil { + panic(err) + } + any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} + proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) + any2b, err := proto.Marshal(any2) + if err != nil { + panic(err) + } + m6 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + ManyThings: []*anypb.Any{ + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + }, + } + + const ( + m1Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m2Golden = ` +name: "David" +result_count: 47 +anything: < + ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m3Golden = ` +name: "David" +result_count: 47 +anything: < + ["type.googleapis.com/\"/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m4Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m5Golden = ` +[type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" +> +` + m6Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 42 + bikeshed: GREEN + rep_bytes: "roboto" + [testdata.Ext.more]: < + data: "baz" + > + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +` + ) + return []golden{ + {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, + {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, + {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, + {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, + {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, + {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, + } +} + +func TestMarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { + t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) + } + if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { + t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) + } + } +} + +func TestUnmarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + want := tt.m + got := proto.Clone(tt.m) + got.Reset() + if err := proto.UnmarshalText(tt.t, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) + } + got.Reset() + if err := proto.UnmarshalText(tt.c, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) + } + } +} + +func TestMarshalUnknownAny(t *testing.T) { + m := &pb.Message{ + Anything: &anypb.Any{ + TypeUrl: "foo", + Value: []byte("bar"), + }, + } + want := `anything: < + type_url: "foo" + value: "bar" +> +` + got := expandedMarshaler.Text(m) + if got != want { + t.Errorf("got\n`%s`\nwant\n`%s`", got, want) + } +} + +func TestAmbiguousAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + type_url: "ttt/proto3_proto.Nested" + value: "\n\x05Monty" + `, pb) + t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) + if err != nil { + t.Errorf("failed to parse ambiguous Any message: %v", err) + } +} + +func TestUnmarshalOverwriteAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 7: Any message unpacked multiple times, or "type_url" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} + +func TestUnmarshalAnyMixAndMatch(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + value: "\n\x05Monty" + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 5: Any message unpacked multiple times, or "value" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index 84bbaf43359..e392575b353 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -30,7 +30,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer deep copy and merge. -// TODO: MessageSet and RawMessage. +// TODO: RawMessage. package proto @@ -84,9 +84,15 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := in.Addr().Interface().(extendableProto); ok { - emOut := out.Addr().Interface().(extendableProto) - mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } } uf := in.FieldByName("XXX_unrecognized") diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go new file mode 100644 index 00000000000..f607ff49eba --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(3.0), + Exact: proto.Bool(true), + }, // the entire message should be overwritten + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, + // proto3 shouldn't merge zero values, + // in the same way that proto2 shouldn't merge nils. + { + src: &proto3pb.Message{ + Name: "Aaron", + Data: []byte(""), // zero value, but not nil + }, + dst: &proto3pb.Message{ + HeightInCm: 176, + Data: []byte("texas!"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + HeightInCm: 176, + Data: []byte("texas!"), + }, + }, + // Oneof fields should merge by assignment. + { + src: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + }, + // Oneof nil is the same as not set. + { + src: &pb.Communique{}, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + }, + { + src: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, // replace + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert + }, + }, + dst: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep + }, + }, + want: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, + }, + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index 5810782fd84..04dcb881305 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -378,6 +378,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group wire := int(u & 0x7) if wire == WireEndGroup { if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) @@ -390,11 +395,12 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group if !ok { // Maybe it's an extension? if prop.extendable { - if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { if err = o.skip(st, tag, wire); err == nil { - ext := e.ExtensionMap()[int32(tag)] // may be missing + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing ext.enc = append(ext.enc, o.buf[oi:o.index]...) - e.ExtensionMap()[int32(tag)] = ext + extmap[int32(tag)] = ext } continue } @@ -768,10 +774,11 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { } } keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() || !valelem.IsValid() { - // We did not decode the key or the value in the map entry. - // Either way, it's an invalid map entry. - return fmt.Errorf("proto: bad map data: missing key/val") + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) } v.SetMapIndex(keyelem, valelem) diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 7321e1aae12..8c1b8fd1f68 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -64,8 +64,16 @@ var ( // a struct with a repeated field containing a nil element. errRepeatedHasNil = errors.New("proto: repeated field has nil element") + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") ) // The fundamental encoders that put bytes on the wire. @@ -74,6 +82,10 @@ var ( const maxVarintBytes = 10 // maximum length of a varint +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -105,6 +117,11 @@ func (p *Buffer) EncodeVarint(x uint64) error { return nil } +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + func sizeVarint(x uint64) (n int) { for { n++ @@ -268,6 +285,9 @@ func (p *Buffer) Marshal(pb Message) error { stats.Encode++ } + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } return err } @@ -1053,10 +1073,25 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) { // Encode an extension map. func (o *Buffer) enc_map(p *Properties, base structPointer) error { - v := *structPointer_ExtMap(base, p.field) - if err := encodeExtensionMap(v); err != nil { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { return err } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + if err := encodeExtensions(exts); err != nil { + return err + } + v, _ := exts.extensionsRead() + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { // Fast-path for common cases: zero or one extensions. if len(v) <= 1 { for _, e := range v { @@ -1079,8 +1114,13 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { } func size_map(p *Properties, base structPointer) int { - v := *structPointer_ExtMap(base, p.field) - return sizeExtensionMap(v) + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) } // Encode a map field. @@ -1109,7 +1149,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { return err } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { return err } return nil @@ -1119,11 +1159,6 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { for _, key := range v.MapKeys() { val := v.MapIndex(key) - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - keycopy.Set(key) valcopy.Set(val) @@ -1211,13 +1246,18 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { return err } } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } } } // Do oneof fields. if prop.oneofMarshaler != nil { m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err != nil { + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { return err } } @@ -1225,6 +1265,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } if len(v) > 0 { o.buf = append(o.buf, v...) } @@ -1248,24 +1291,9 @@ func size_struct(prop *StructProperties, base structPointer) (n int) { } // Factor in any oneof fields. - // TODO: This could be faster and use less reflection. - if prop.oneofMarshaler != nil { - sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem() - for i := 0; i < prop.stype.NumField(); i++ { - fv := sv.Field(i) - if fv.Kind() != reflect.Interface || fv.IsNil() { - continue - } - if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" { - continue - } - spv := fv.Elem() // interface -> *T - sv := spv.Elem() // *T -> T - sf := sv.Type().Field(0) // StructField inside T - var prop Properties - prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf) - n += prop.size(&prop, toStructPointer(spv)) - } + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) } return diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index 5475c3d9596..8b16f951c71 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -30,7 +30,6 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer comparison. -// TODO: MessageSet. package proto @@ -51,7 +50,9 @@ Equality is defined in this way: are equal, and extensions sets are equal. - Two set scalar fields are equal iff their values are equal. If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). - Two repeated fields are equal iff their lengths are the same, and their corresponding elements are equal (a "bytes" field, although represented by []byte, is not a repeated field) @@ -89,6 +90,7 @@ func Equal(a, b Message) bool { // v1 and v2 are known to have the same type. func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) for i := 0; i < v1.NumField(); i++ { f := v1.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { @@ -114,14 +116,21 @@ func equalStruct(v1, v2 reflect.Value) bool { } f1, f2 = f1.Elem(), f2.Elem() } - if !equalAny(f1, f2) { + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { return false } } if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { em2 := v2.FieldByName("XXX_extensions") - if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { return false } } @@ -141,7 +150,8 @@ func equalStruct(v1, v2 reflect.Value) bool { } // v1 and v2 are known to have the same type. -func equalAny(v1, v2 reflect.Value) bool { +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { if v1.Type() == protoMessageType { m1, _ := v1.Interface().(Message) m2, _ := v2.Interface().(Message) @@ -164,7 +174,7 @@ func equalAny(v1, v2 reflect.Value) bool { if e1.Type() != e2.Type() { return false } - return equalAny(e1, e2) + return equalAny(e1, e2, nil) case reflect.Map: if v1.Len() != v2.Len() { return false @@ -175,16 +185,29 @@ func equalAny(v1, v2 reflect.Value) bool { // This key was not found in the second map. return false } - if !equalAny(v1.MapIndex(key), val2) { + if !equalAny(v1.MapIndex(key), val2, nil) { return false } } return true case reflect.Ptr: - return equalAny(v1.Elem(), v2.Elem()) + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) case reflect.Slice: if v1.Type().Elem().Kind() == reflect.Uint8 { // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } if v1.IsNil() != v2.IsNil() { return false } @@ -195,7 +218,7 @@ func equalAny(v1, v2 reflect.Value) bool { return false } for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i)) { + if !equalAny(v1.Index(i), v2.Index(i), prop) { return false } } @@ -214,8 +237,14 @@ func equalAny(v1, v2 reflect.Value) bool { } // base is the struct type that the extensions are based on. -// em1 and em2 are extension maps. -func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { if len(em1) != len(em2) { return false } @@ -230,7 +259,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { if m1 != nil && m2 != nil { // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { return false } continue @@ -258,7 +287,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) return false } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { return false } } diff --git a/vendor/github.com/golang/protobuf/proto/equal_test.go b/vendor/github.com/golang/protobuf/proto/equal_test.go new file mode 100644 index 00000000000..7b45eaa6dc7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal_test.go @@ -0,0 +1,212 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// Four identical base messages. +// The init function adds extensions to some of them. +var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} + +// Two messages with non-message extensions. +var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} +var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} + +func init() { + ext1 := &pb.Ext{Data: String("Kirk")} + ext2 := &pb.Ext{Data: String("Picard")} + + // messageWithExtension1a has ext1, but never marshals it. + if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1a failed: " + err.Error()) + } + + // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. + if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1b failed: " + err.Error()) + } + buf, err := Marshal(messageWithExtension1b) + if err != nil { + panic("Marshal of 1b failed: " + err.Error()) + } + messageWithExtension1b.Reset() + if err := Unmarshal(buf, messageWithExtension1b); err != nil { + panic("Unmarshal of 1b failed: " + err.Error()) + } + + // messageWithExtension2 has ext2. + if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { + panic("SetExtension on 2 failed: " + err.Error()) + } + + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { + panic("SetExtension on Int32-1 failed: " + err.Error()) + } + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { + panic("SetExtension on Int32-2 failed: " + err.Error()) + } +} + +var EqualTests = []struct { + desc string + a, b Message + exp bool +}{ + {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, + {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, + {"nil vs nil", nil, nil, true}, + {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, + {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, + {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, + + {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, + {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, + {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, + {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, + + {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, + {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, + {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, + {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, + {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, + {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, + {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, + + { + "nested, different", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, + false, + }, + { + "nested, equal", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + true, + }, + + {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, + {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, + {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, + { + "repeated bytes", + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + true, + }, + // In proto3, []byte{} and []byte(nil) are equal. + {"proto3 bytes, empty vs nil", &proto3pb.Message{Data: []byte{}}, &proto3pb.Message{Data: nil}, true}, + + {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, + {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, + {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, + + {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, + {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + + { + "message with group", + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + true, + }, + + { + "map same", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + true, + }, + { + "map different entry", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, + false, + }, + { + "map different key only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, + false, + }, + { + "map different value only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, + false, + }, + { + "oneof same", + &pb.Communique{Union: &pb.Communique_Number{41}}, + &pb.Communique{Union: &pb.Communique_Number{41}}, + true, + }, + { + "oneof one nil", + &pb.Communique{Union: &pb.Communique_Number{41}}, + &pb.Communique{}, + false, + }, + { + "oneof different", + &pb.Communique{Union: &pb.Communique_Number{41}}, + &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, + false, + }, +} + +func TestEqual(t *testing.T) { + for _, tc := range EqualTests { + if res := Equal(tc.a, tc.b); res != tc.exp { + t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index e591ccef79f..6b9b3637466 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -52,14 +52,99 @@ type ExtensionRange struct { Start, End int32 // both inclusive } -// extendableProto is an interface implemented by any protocol buffer that may be extended. +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { Message ExtensionRangeArray() []ExtensionRange ExtensionMap() map[int32]Extension } +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. @@ -92,8 +177,13 @@ type Extension struct { } // SetRawExtension is for testing only. -func SetRawExtension(base extendableProto, id int32, b []byte) { - base.ExtensionMap()[id] = Extension{enc: b} +func SetRawExtension(base Message, id int32, b []byte) { + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} } // isExtensionField returns true iff the given field number is in an extension range. @@ -108,8 +198,12 @@ func isExtensionField(pb extendableProto, field int32) bool { // checkExtensionTypes checks that the given extension is valid for pb. func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb // Check the extended type. - if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) } // Check the range. @@ -155,8 +249,19 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. -func encodeExtensionMap(m map[int32]Extension) error { +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { for k, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. @@ -184,7 +289,17 @@ func encodeExtensionMap(m map[int32]Extension) error { return nil } -func sizeExtensionMap(m map[int32]Extension) (n int) { +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { for _, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. @@ -209,26 +324,51 @@ func sizeExtensionMap(m map[int32]Extension) (n int) { } // HasExtension returns whether the given extension is present in pb. -func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { +func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - _, ok := pb.ExtensionMap()[extension.Field] + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() return ok } // ClearExtension removes the given extension from pb. -func ClearExtension(pb extendableProto, extension *ExtensionDesc) { +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, ok := extendable(pb) + if !ok { + return + } // TODO: Check types, field numbers, etc.? - delete(pb.ExtensionMap(), extension.Field) + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) } // GetExtension parses and returns the given extension of pb. // If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { - if err := checkExtensionTypes(pb, extension); err != nil { +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + + if err := checkExtensionTypes(epb, extension); err != nil { return nil, err } - emap := pb.ExtensionMap() + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() e, ok := emap[extension.Field] if !ok { // defaultExtensionValue returns the default value or @@ -301,7 +441,6 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { o := NewBuffer(b) t := reflect.TypeOf(extension.ExtensionType) - rep := extension.repeated() props := extensionProperties(extension) @@ -323,7 +462,7 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { return nil, err } - if !rep || o.index >= len(o.buf) { + if o.index >= len(o.buf) { break } } @@ -333,10 +472,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := pb.(extendableProto) + epb, ok := extendable(pb) if !ok { - err = errors.New("proto: not an extendable proto") - return + return nil, errors.New("proto: not an extendable proto") } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -351,9 +489,44 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e return } +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + // SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if err := checkExtensionTypes(pb, extension); err != nil { +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) @@ -369,10 +542,23 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) } - pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} return nil } +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + // A global registry of extensions. // The generated code will register the generated descriptors by calling RegisterExtension. diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go new file mode 100644 index 00000000000..403d7c65eab --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions_test.go @@ -0,0 +1,508 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestExtensionDescsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{Count: proto.Int32(0)} + extdesc1 := pb.E_Ext_More + if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { + t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) + } + + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + extdesc2 := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 123456789, + Name: "a.b", + Tag: "varint,123456789,opt", + } + ext2 := proto.Bool(false) + if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { + t.Fatalf("Could not set ext2: %s", err) + } + + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Could not marshal msg: %v", err) + } + if err := proto.Unmarshal(b, msg); err != nil { + t.Fatalf("Could not unmarshal into msg: %v", err) + } + + descs, err := proto.ExtensionDescs(msg) + if err != nil { + t.Fatalf("proto.ExtensionDescs: got error %v", err) + } + sortExtDescs(descs) + wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}} + if !reflect.DeepEqual(descs, wantDescs) { + t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) + } +} + +type ExtensionDescSlice []*proto.ExtensionDesc + +func (s ExtensionDescSlice) Len() int { return len(s) } +func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } +func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func sortExtDescs(s []*proto.ExtensionDesc) { + sort.Sort(ExtensionDescSlice(s)) +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestGetExtensionDefaults(t *testing.T) { + var setFloat64 float64 = 1 + var setFloat32 float32 = 2 + var setInt32 int32 = 3 + var setInt64 int64 = 4 + var setUint32 uint32 = 5 + var setUint64 uint64 = 6 + var setBool = true + var setBool2 = false + var setString = "Goodnight string" + var setBytes = []byte("Goodnight bytes") + var setEnum = pb.DefaultsMessage_TWO + + type testcase struct { + ext *proto.ExtensionDesc // Extension we are testing. + want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). + def interface{} // Expected value of extension after ClearExtension(). + } + tests := []testcase{ + {pb.E_NoDefaultDouble, setFloat64, nil}, + {pb.E_NoDefaultFloat, setFloat32, nil}, + {pb.E_NoDefaultInt32, setInt32, nil}, + {pb.E_NoDefaultInt64, setInt64, nil}, + {pb.E_NoDefaultUint32, setUint32, nil}, + {pb.E_NoDefaultUint64, setUint64, nil}, + {pb.E_NoDefaultSint32, setInt32, nil}, + {pb.E_NoDefaultSint64, setInt64, nil}, + {pb.E_NoDefaultFixed32, setUint32, nil}, + {pb.E_NoDefaultFixed64, setUint64, nil}, + {pb.E_NoDefaultSfixed32, setInt32, nil}, + {pb.E_NoDefaultSfixed64, setInt64, nil}, + {pb.E_NoDefaultBool, setBool, nil}, + {pb.E_NoDefaultBool, setBool2, nil}, + {pb.E_NoDefaultString, setString, nil}, + {pb.E_NoDefaultBytes, setBytes, nil}, + {pb.E_NoDefaultEnum, setEnum, nil}, + {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, + {pb.E_DefaultFloat, setFloat32, float32(3.14)}, + {pb.E_DefaultInt32, setInt32, int32(42)}, + {pb.E_DefaultInt64, setInt64, int64(43)}, + {pb.E_DefaultUint32, setUint32, uint32(44)}, + {pb.E_DefaultUint64, setUint64, uint64(45)}, + {pb.E_DefaultSint32, setInt32, int32(46)}, + {pb.E_DefaultSint64, setInt64, int64(47)}, + {pb.E_DefaultFixed32, setUint32, uint32(48)}, + {pb.E_DefaultFixed64, setUint64, uint64(49)}, + {pb.E_DefaultSfixed32, setInt32, int32(50)}, + {pb.E_DefaultSfixed64, setInt64, int64(51)}, + {pb.E_DefaultBool, setBool, true}, + {pb.E_DefaultBool, setBool2, true}, + {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, + {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, + } + + checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { + val, err := proto.GetExtension(msg, test.ext) + if err != nil { + if valWant != nil { + return fmt.Errorf("GetExtension(): %s", err) + } + if want := proto.ErrMissingExtension; err != want { + return fmt.Errorf("Unexpected error: got %v, want %v", err, want) + } + return nil + } + + // All proto2 extension values are either a pointer to a value or a slice of values. + ty := reflect.TypeOf(val) + tyWant := reflect.TypeOf(test.ext.ExtensionType) + if got, want := ty, tyWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) + } + tye := ty.Elem() + tyeWant := tyWant.Elem() + if got, want := tye, tyeWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) + } + + // Check the name of the type of the value. + // If it is an enum it will be type int32 with the name of the enum. + if got, want := tye.Name(), tye.Name(); got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) + } + + // Check that value is what we expect. + // If we have a pointer in val, get the value it points to. + valExp := val + if ty.Kind() == reflect.Ptr { + valExp = reflect.ValueOf(val).Elem().Interface() + } + if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { + return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) + } + + return nil + } + + setTo := func(test testcase) interface{} { + setTo := reflect.ValueOf(test.want) + if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { + setTo = reflect.New(typ).Elem() + setTo.Set(reflect.New(setTo.Type().Elem())) + setTo.Elem().Set(reflect.ValueOf(test.want)) + } + return setTo.Interface() + } + + for _, test := range tests { + msg := &pb.DefaultsMessage{} + name := test.ext.Name + + // Check the initial value. + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + + // Set the per-type value and check value. + name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) + if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { + t.Errorf("%s: SetExtension(): %v", name, err) + continue + } + if err := checkVal(test, msg, test.want); err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Set and check the value. + name += " (cleared)" + proto.ClearExtension(msg, test.ext) + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} + +func TestNilExtension(t *testing.T) { + msg := &pb.MyMessage{ + Count: proto.Int32(1), + } + if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { + t.Fatal(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { + t.Error("expected SetExtension to fail due to a nil extension") + } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + t.Errorf("expected error %v, got %v", want, err) + } + // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update + // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. +} + +func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { + // Add a repeated extension to the result. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + // Marshal message with a repeated extension. + msg1 := new(pb.OtherMessage) + err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) + if err != nil { + t.Fatalf("[%s] Error setting extension: %v", test.name, err) + } + b, err := proto.Marshal(msg1) + if err != nil { + t.Fatalf("[%s] Error marshaling message: %v", test.name, err) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err = proto.Unmarshal(b, msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_RComplex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.([]*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(ext, test.ext) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) + } + } +} + +func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { + // We may see multiple instances of the same extension in the wire + // format. For example, the proto compiler may encode custom options in + // this way. Here, we verify that we merge the extensions together. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + var buf bytes.Buffer + var want pb.ComplexExtension + + // Generate a serialized representation of a repeated extension + // by catenating bytes together. + for i, e := range test.ext { + // Merge to create the wanted proto. + proto.Merge(&want, e) + + // serialize the message + msg := new(pb.OtherMessage) + err := proto.SetExtension(msg, pb.E_Complex, e) + if err != nil { + t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) + } + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) + } + buf.Write(b) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err := proto.Unmarshal(buf.Bytes(), msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_Complex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.(*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(*ext, want) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) + } + } +} + +func TestClearAllExtensions(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + m := &pb.MyMessage{} + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + if !proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) + } + proto.ClearAllExtensions(m) + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index abbf7583ed0..ac4ddbc0759 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -70,6 +70,12 @@ for a protocol buffer variable v: with distinguished wrapper types for each possible field value. - Marshal and Unmarshal are functions to encode and decode the wire format. +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + The simplest way to describe this is to see an example. Given file test.proto, containing @@ -216,7 +222,7 @@ The resulting file, test.pb.go, is: To create and play with a Test object: -package main + package main import ( "log" @@ -229,6 +235,7 @@ package main test := &pb.Test{ Label: proto.String("hello"), Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, Optionalgroup: &pb.Test_OptionalGroup{ RequiredField: proto.String("good bye"), }, @@ -301,7 +308,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // write point + index int // read point // pools of basic types to amortize allocation. bools []bool @@ -881,3 +888,11 @@ func isProto3Zero(v reflect.Value) bool { } return false } + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index 9d912bce19b..fd982decd66 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -44,11 +44,11 @@ import ( "sort" ) -// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. // A message type ID is required for storing a protocol buffer in a message set. -var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") +var errNoMessageTypeID = errors.New("proto does not have a message type ID") -// The first two types (_MessageSet_Item and MessageSet) +// The first two types (_MessageSet_Item and messageSet) // model what the protocol compiler produces for the following protocol message: // message MessageSet { // repeated group Item = 1 { @@ -58,27 +58,20 @@ var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") // } // That is the MessageSet wire format. We can't use a proto to generate these // because that would introduce a circular dependency between it and this package. -// -// When a proto1 proto has a field that looks like: -// optional message info = 3; -// the protocol compiler produces a field in the generated struct that looks like: -// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` -// The package is automatically inserted so there is no need for that proto file to -// import this package. type _MessageSet_Item struct { TypeId *int32 `protobuf:"varint,2,req,name=type_id"` Message []byte `protobuf:"bytes,3,req,name=message"` } -type MessageSet struct { +type messageSet struct { Item []*_MessageSet_Item `protobuf:"group,1,rep"` XXX_unrecognized []byte // TODO: caching? } -// Make sure MessageSet is a Message. -var _ Message = (*MessageSet)(nil) +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) // messageTypeIder is an interface satisfied by a protocol buffer type // that may be stored in a MessageSet. @@ -86,7 +79,7 @@ type messageTypeIder interface { MessageTypeId() int32 } -func (ms *MessageSet) find(pb Message) *_MessageSet_Item { +func (ms *messageSet) find(pb Message) *_MessageSet_Item { mti, ok := pb.(messageTypeIder) if !ok { return nil @@ -100,24 +93,24 @@ func (ms *MessageSet) find(pb Message) *_MessageSet_Item { return nil } -func (ms *MessageSet) Has(pb Message) bool { +func (ms *messageSet) Has(pb Message) bool { if ms.find(pb) != nil { return true } return false } -func (ms *MessageSet) Unmarshal(pb Message) error { +func (ms *messageSet) Unmarshal(pb Message) error { if item := ms.find(pb); item != nil { return Unmarshal(item.Message, pb) } if _, ok := pb.(messageTypeIder); !ok { - return ErrNoMessageTypeId + return errNoMessageTypeID } return nil // TODO: return error instead? } -func (ms *MessageSet) Marshal(pb Message) error { +func (ms *messageSet) Marshal(pb Message) error { msg, err := Marshal(pb) if err != nil { return err @@ -130,7 +123,7 @@ func (ms *MessageSet) Marshal(pb Message) error { mti, ok := pb.(messageTypeIder) if !ok { - return ErrNoMessageTypeId + return errNoMessageTypeID } mtid := mti.MessageTypeId() @@ -141,9 +134,9 @@ func (ms *MessageSet) Marshal(pb Message) error { return nil } -func (ms *MessageSet) Reset() { *ms = MessageSet{} } -func (ms *MessageSet) String() string { return CompactTextString(ms) } -func (*MessageSet) ProtoMessage() {} +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} // Support for the message_set_wire_format message option. @@ -156,9 +149,21 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { - if err := encodeExtensionMap(m); err != nil { - return nil, err +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") } // Sort extension IDs to provide a deterministic encoding. @@ -169,7 +174,7 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { } sort.Ints(ids) - ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} for _, id := range ids { e := m[int32(id)] // Remove the wire type and field number varint, as well as the length varint. @@ -185,8 +190,18 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(MessageSet) +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) if err := Unmarshal(buf, ms); err != nil { return err } @@ -216,7 +231,16 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { // MarshalMessageSetJSON encodes the extension map represented by m in JSON format. // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } var b bytes.Buffer b.WriteByte('{') @@ -259,7 +283,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { // Common-case fast path. if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { return nil diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go new file mode 100644 index 00000000000..353a3ea7694 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &messageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + var extensions XXX_InternalExtensions + if err := UnmarshalMessageSet(b, &extensions); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := extensions.p.extensionMap[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index 749919d250a..fb512e2e16d 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine +// +build appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -139,6 +139,11 @@ func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) } +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index e9be0fe92ee..6b5567d47cd 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine +// +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -126,6 +126,10 @@ func structPointer_StringSlice(p structPointer, f field) *[]string { } // ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec3125812ab..69ddda8d4ba 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -37,6 +37,7 @@ package proto import ( "fmt" + "log" "os" "reflect" "sort" @@ -90,6 +91,9 @@ type oneofMarshaler func(Message, *Buffer) error // A oneofUnmarshaler does the unmarshaling for a oneof field in a message. type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -141,6 +145,7 @@ type StructProperties struct { oneofMarshaler oneofMarshaler oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer stype reflect.Type // OneofTypes contains information about the oneof fields in this message. @@ -168,6 +173,7 @@ func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order type Properties struct { Name string // name of the field, for error messages OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc Wire string WireType int Tag int @@ -224,8 +230,9 @@ func (p *Properties) String() string { if p.Packed { s += ",packed" } - if p.OrigName != p.Name { - s += ",name=" + p.OrigName + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName } if p.proto3 { s += ",proto3" @@ -305,6 +312,8 @@ func (p *Properties) Parse(s string) { p.Packed = true case strings.HasPrefix(f, "name="): p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] case strings.HasPrefix(f, "enum="): p.Enum = f[5:] case f == "proto3": @@ -464,17 +473,13 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { + if p.proto3 { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte } case reflect.Float32, reflect.Float64: switch t2.Bits() { @@ -673,7 +678,8 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -684,15 +690,22 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_extensions" { // special case + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case p.enc = (*Buffer).enc_map p.dec = nil // not needed p.size = size_map - } - if f.Name == "XXX_unrecognized" { // special case + } else if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } - oneof := f.Tag.Get("protobuf_oneof") != "" // special case + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } prop.Prop[i] = p prop.order[i] = i if debug { @@ -702,7 +715,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } @@ -711,11 +724,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { sort.Sort(prop) type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{}) + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs() + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() prop.stype = t // Interpret oneof metadata. @@ -809,3 +822,43 @@ func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[ func EnumValueMap(enumType string) map[string]int32 { return enumValueMaps[enumType] } + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 00000000000..be386f1d539 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,199 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} +func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` + ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +func (m *Message) GetAnything() *google_protobuf.Any { + if m != nil { + return m.Anything + } + return nil +} + +func (m *Message) GetManyThings() []*google_protobuf.Any { + if m != nil { + return m.ManyThings + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") + proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} + +var fileDescriptor0 = []byte{ + // 621 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x5b, 0x6f, 0xd3, 0x4c, + 0x10, 0xfd, 0x72, 0xa9, 0xe3, 0x8e, 0x9d, 0xd6, 0xda, 0xaf, 0x48, 0xdb, 0x88, 0x87, 0x12, 0x24, + 0x54, 0x71, 0x71, 0x51, 0x10, 0x52, 0x85, 0x10, 0xa8, 0x2d, 0xad, 0x88, 0x9a, 0x86, 0x68, 0xd3, + 0x52, 0xf1, 0x64, 0xad, 0xd3, 0x4d, 0x62, 0x11, 0xaf, 0x23, 0x7b, 0x8d, 0xe4, 0xbf, 0xc3, 0xaf, + 0xe4, 0x91, 0xbd, 0x38, 0xad, 0x5b, 0x05, 0x78, 0xf2, 0xee, 0xcc, 0x39, 0x33, 0xb3, 0xe7, 0x8c, + 0x61, 0x77, 0x99, 0x26, 0x22, 0x79, 0x13, 0xe8, 0xcf, 0x81, 0xb9, 0xf8, 0xfa, 0x83, 0xdc, 0x6a, + 0xaa, 0xb3, 0x3b, 0x4b, 0x92, 0xd9, 0x82, 0x19, 0x48, 0x98, 0x4f, 0x0f, 0x28, 0x2f, 0x0c, 0xb0, + 0xf3, 0xbf, 0x60, 0x99, 0xb8, 0xa1, 0x82, 0x1e, 0xa8, 0x83, 0x09, 0x76, 0x7f, 0x59, 0xd0, 0xba, + 0x60, 0x59, 0x46, 0x67, 0x0c, 0x21, 0x68, 0x72, 0x1a, 0x33, 0x5c, 0xdb, 0xab, 0xed, 0x6f, 0x12, + 0x7d, 0x46, 0x87, 0x60, 0xcf, 0xa3, 0x05, 0x4d, 0x23, 0x51, 0xe0, 0xba, 0x8c, 0x6f, 0xf5, 0x1e, + 0xfb, 0xd5, 0x86, 0x7e, 0x49, 0xf6, 0x3f, 0xe7, 0x71, 0x92, 0xa7, 0xe4, 0x16, 0x8d, 0xf6, 0xc0, + 0x9d, 0xb3, 0x68, 0x36, 0x17, 0x41, 0xc4, 0x83, 0x49, 0x8c, 0x1b, 0x92, 0xdd, 0x26, 0x60, 0x62, + 0x7d, 0x7e, 0x12, 0xab, 0x7e, 0x6a, 0x1c, 0xdc, 0x94, 0x19, 0x97, 0xe8, 0x33, 0x7a, 0x02, 0x6e, + 0xca, 0xb2, 0x7c, 0x21, 0x82, 0x49, 0x92, 0x73, 0x81, 0x5b, 0x32, 0xd7, 0x20, 0x8e, 0x89, 0x9d, + 0xa8, 0x10, 0x7a, 0x0a, 0x6d, 0x91, 0xe6, 0x2c, 0xc8, 0x26, 0x89, 0xc8, 0x62, 0xca, 0xb1, 0x2d, + 0x31, 0x36, 0x71, 0x55, 0x70, 0x5c, 0xc6, 0xd0, 0x0e, 0x6c, 0xc8, 0x7c, 0xca, 0xf0, 0xa6, 0x4c, + 0xd6, 0x89, 0xb9, 0x20, 0x0f, 0x1a, 0xdf, 0x59, 0x81, 0x37, 0xf6, 0x1a, 0xfb, 0x4d, 0xa2, 0x8e, + 0xe8, 0x25, 0x58, 0x5c, 0xaa, 0xc1, 0x6e, 0xb0, 0x25, 0x81, 0x4e, 0x6f, 0xe7, 0xfe, 0xeb, 0x86, + 0x3a, 0x47, 0x4a, 0x0c, 0x7a, 0x0b, 0xad, 0x34, 0x98, 0xe6, 0x9c, 0x17, 0xd8, 0x93, 0x35, 0xfe, + 0x25, 0x86, 0x95, 0x9e, 0x29, 0x2c, 0x7a, 0x0f, 0x2d, 0xc1, 0xd2, 0x94, 0x46, 0x1c, 0x83, 0xa4, + 0x39, 0xbd, 0xee, 0x7a, 0xda, 0xa5, 0x01, 0x9d, 0x72, 0x91, 0x16, 0x64, 0x45, 0x91, 0x16, 0x18, + 0x8b, 0x7b, 0xc1, 0x34, 0x62, 0x8b, 0x1b, 0xec, 0xe8, 0x41, 0x1f, 0xf9, 0x2b, 0x3b, 0xfd, 0x71, + 0x1e, 0x7e, 0x62, 0x53, 0x2a, 0x05, 0xca, 0x88, 0x63, 0xa0, 0x67, 0x0a, 0x89, 0xfa, 0xb7, 0xcc, + 0x1f, 0x74, 0x91, 0x33, 0xdc, 0xd6, 0xcd, 0x9f, 0xad, 0x6f, 0x3e, 0xd2, 0xc8, 0xaf, 0x0a, 0x68, + 0x06, 0x28, 0x4b, 0xe9, 0x08, 0x7a, 0x0d, 0xb6, 0xdc, 0x24, 0x31, 0x8f, 0xf8, 0x0c, 0x6f, 0x95, + 0x4a, 0x99, 0x55, 0xf3, 0x57, 0xab, 0xe6, 0x1f, 0xf1, 0x82, 0xdc, 0xa2, 0xa4, 0x56, 0x8e, 0x34, + 0xa2, 0x08, 0xf4, 0x2d, 0xc3, 0xdb, 0xba, 0xf7, 0x7a, 0x12, 0x28, 0xe0, 0xa5, 0xc6, 0x75, 0x46, + 0xe0, 0x56, 0x65, 0x58, 0x59, 0x66, 0x76, 0x52, 0x5b, 0xf6, 0x1c, 0x36, 0xcc, 0x73, 0xea, 0x7f, + 0x71, 0xcc, 0x40, 0xde, 0xd5, 0x0f, 0x6b, 0x9d, 0x2b, 0xf0, 0x1e, 0xbe, 0x6d, 0x4d, 0xd5, 0x17, + 0xf7, 0xab, 0xfe, 0x41, 0xde, 0xbb, 0xb2, 0xdd, 0x8f, 0x60, 0x19, 0x9b, 0x91, 0x03, 0xad, 0xab, + 0xe1, 0xf9, 0xf0, 0xcb, 0xf5, 0xd0, 0xfb, 0x0f, 0xd9, 0xd0, 0x1c, 0x5d, 0x0d, 0xc7, 0x5e, 0x0d, + 0xb5, 0x61, 0x73, 0x3c, 0x38, 0x1a, 0x8d, 0x2f, 0xfb, 0x27, 0xe7, 0x5e, 0x1d, 0x6d, 0x83, 0x73, + 0xdc, 0x1f, 0x0c, 0x82, 0xe3, 0xa3, 0xfe, 0xe0, 0xf4, 0x9b, 0xd7, 0xe8, 0xf6, 0xc0, 0x32, 0xc3, + 0xaa, 0x65, 0x0d, 0xf5, 0x52, 0x99, 0x79, 0xcc, 0x45, 0xfd, 0x1e, 0x93, 0x5c, 0x98, 0x81, 0x6c, + 0xa2, 0xcf, 0xdd, 0x9f, 0x35, 0xd8, 0x2a, 0x0d, 0xbb, 0x8e, 0xc4, 0xfc, 0x82, 0x2e, 0x91, 0x14, + 0x2c, 0x2c, 0x04, 0x0b, 0x62, 0xba, 0x5c, 0x2a, 0x77, 0x6a, 0x5a, 0xe8, 0x57, 0x6b, 0x4d, 0x2e, + 0x39, 0xfe, 0xb1, 0x24, 0x5c, 0x18, 0x7c, 0xe9, 0x75, 0x78, 0x17, 0xe9, 0x7c, 0x00, 0xef, 0x21, + 0xa0, 0x2a, 0x98, 0x6d, 0x04, 0xdb, 0xa9, 0x0a, 0xe6, 0x56, 0x94, 0x09, 0x2d, 0xd3, 0xfa, 0x77, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0xc9, 0x75, 0x36, 0xb5, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto new file mode 100644 index 00000000000..3e9a1abbdb7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -0,0 +1,74 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "testdata/test.proto"; + +package proto3_proto; + +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } + + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + Nested nested = 6; + repeated Humour r_funny = 16; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; + + google.protobuf.Any anything = 14; + repeated google.protobuf.Any many_things = 15; +} + +message Nested { + string bunny = 1; + bool cute = 2; +} + +message MessageWithMap { + map byte_mapping = 1; +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go new file mode 100644 index 00000000000..462f8055c30 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/proto3_proto" + tpb "github.com/golang/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go new file mode 100644 index 00000000000..a2729c39a1b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go new file mode 100644 index 00000000000..af1034dc7b8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size_test.go @@ -0,0 +1,164 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, + + {"oneof not set", &pb.Oneof{}}, + {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, + {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, + {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, + {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, + {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, + {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, + {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, + {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, + {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, + {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, + {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, + {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, + {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, + {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, + {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, + {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, + {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, + {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, + {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, + {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/Makefile b/vendor/github.com/golang/protobuf/proto/testdata/Makefile new file mode 100644 index 00000000000..fc288628a75 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/Makefile @@ -0,0 +1,50 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f test.pb.go + make test.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + git diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff --git a/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go new file mode 100644 index 00000000000..7172d0e9698 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go new file mode 100644 index 00000000000..85b3bdf5f01 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go @@ -0,0 +1,4061 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoTestRequiredGroupField + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + RequiredInnerMessage + MyMessage + Ext + ComplexExtension + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap + Oneof + Communique +*/ +package testdata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} +func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} +func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} +func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{16, 0} +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} } + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} } + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} +func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req,name=Label,json=label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type,json=type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} +func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,json=kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table,json=table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param,json=param" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField,json=repeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField,json=optionalField" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=fBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=fInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=fInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=fFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=fFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=fUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=fUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=fFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=fDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=fStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=fBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=fSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=fSint64Required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=fBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=fInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=fInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=fFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=fFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=fUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=fUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=fFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=fDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=fStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=fBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=fSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=fSint64Repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=fBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=fInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=fInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=fFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=fFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=fUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=fUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=fFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=fDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=fStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=fBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=fSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=fSint64Optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=fBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=fInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=fInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=fFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=fFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=fUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=fUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=fFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=fDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=fStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=fBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=fSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=fSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=fBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=fInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=fInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=fFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=fFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=fUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=fUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=fFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=fDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=fSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=fSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} +func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing a group containing a required field. +type GoTestRequiredGroupField struct { + Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } +func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField) ProtoMessage() {} +func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { + if m != nil { + return m.Group + } + return nil +} + +type GoTestRequiredGroupField_Group struct { + Field *int32 `protobuf:"varint,2,req,name=Field,json=field" json:"Field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } +func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField_Group) ProtoMessage() {} +func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +func (m *GoTestRequiredGroupField_Group) GetField() int32 { + if m != nil && m.Field != nil { + return *m.Field + } + return 0 +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} +func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} +func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} +func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} +func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} +func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} +func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} +func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} +func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} +func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} +func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_OtherMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherMessage +} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} +func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} +func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type ComplexExtension struct { + First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` + Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` + Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } +func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } +func (*ComplexExtension) ProtoMessage() {} +func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *ComplexExtension) GetFirst() int32 { + if m != nil && m.First != nil { + return *m.First + } + return 0 +} + +func (m *ComplexExtension) GetSecond() int32 { + if m != nil && m.Second != nil { + return *m.Second + } + return 0 +} + +func (m *ComplexExtension) GetThird() []int32 { + if m != nil { + return m.Third + } + return nil +} + +type DefaultsMessage struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} +func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} + +type MyMessageSet struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} +func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} +func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} +func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=fString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=fPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=fNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=fNan,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} +func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} +func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} +func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} +func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} +func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} +func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} } + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} +func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +func (m *FloatingPoint) GetExact() bool { + if m != nil && m.Exact != nil { + return *m.Exact + } + return false +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +type Oneof struct { + // Types that are valid to be assigned to Union: + // *Oneof_F_Bool + // *Oneof_F_Int32 + // *Oneof_F_Int64 + // *Oneof_F_Fixed32 + // *Oneof_F_Fixed64 + // *Oneof_F_Uint32 + // *Oneof_F_Uint64 + // *Oneof_F_Float + // *Oneof_F_Double + // *Oneof_F_String + // *Oneof_F_Bytes + // *Oneof_F_Sint32 + // *Oneof_F_Sint64 + // *Oneof_F_Enum + // *Oneof_F_Message + // *Oneof_FGroup + // *Oneof_F_Largest_Tag + Union isOneof_Union `protobuf_oneof:"union"` + // Types that are valid to be assigned to Tormato: + // *Oneof_Value + Tormato isOneof_Tormato `protobuf_oneof:"tormato"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof) Reset() { *m = Oneof{} } +func (m *Oneof) String() string { return proto.CompactTextString(m) } +func (*Oneof) ProtoMessage() {} +func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type isOneof_Union interface { + isOneof_Union() +} +type isOneof_Tormato interface { + isOneof_Tormato() +} + +type Oneof_F_Bool struct { + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,oneof"` +} +type Oneof_F_Int32 struct { + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,oneof"` +} +type Oneof_F_Int64 struct { + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,oneof"` +} +type Oneof_F_Fixed32 struct { + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,oneof"` +} +type Oneof_F_Fixed64 struct { + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,oneof"` +} +type Oneof_F_Uint32 struct { + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,oneof"` +} +type Oneof_F_Uint64 struct { + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,oneof"` +} +type Oneof_F_Float struct { + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,oneof"` +} +type Oneof_F_Double struct { + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,oneof"` +} +type Oneof_F_String struct { + F_String string `protobuf:"bytes,10,opt,name=F_String,json=fString,oneof"` +} +type Oneof_F_Bytes struct { + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,oneof"` +} +type Oneof_F_Sint32 struct { + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,oneof"` +} +type Oneof_F_Sint64 struct { + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,oneof"` +} +type Oneof_F_Enum struct { + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.MyMessage_Color,oneof"` +} +type Oneof_F_Message struct { + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=fMessage,oneof"` +} +type Oneof_FGroup struct { + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` +} +type Oneof_F_Largest_Tag struct { + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=fLargestTag,oneof"` +} +type Oneof_Value struct { + Value int32 `protobuf:"varint,100,opt,name=value,oneof"` +} + +func (*Oneof_F_Bool) isOneof_Union() {} +func (*Oneof_F_Int32) isOneof_Union() {} +func (*Oneof_F_Int64) isOneof_Union() {} +func (*Oneof_F_Fixed32) isOneof_Union() {} +func (*Oneof_F_Fixed64) isOneof_Union() {} +func (*Oneof_F_Uint32) isOneof_Union() {} +func (*Oneof_F_Uint64) isOneof_Union() {} +func (*Oneof_F_Float) isOneof_Union() {} +func (*Oneof_F_Double) isOneof_Union() {} +func (*Oneof_F_String) isOneof_Union() {} +func (*Oneof_F_Bytes) isOneof_Union() {} +func (*Oneof_F_Sint32) isOneof_Union() {} +func (*Oneof_F_Sint64) isOneof_Union() {} +func (*Oneof_F_Enum) isOneof_Union() {} +func (*Oneof_F_Message) isOneof_Union() {} +func (*Oneof_FGroup) isOneof_Union() {} +func (*Oneof_F_Largest_Tag) isOneof_Union() {} +func (*Oneof_Value) isOneof_Tormato() {} + +func (m *Oneof) GetUnion() isOneof_Union { + if m != nil { + return m.Union + } + return nil +} +func (m *Oneof) GetTormato() isOneof_Tormato { + if m != nil { + return m.Tormato + } + return nil +} + +func (m *Oneof) GetF_Bool() bool { + if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { + return x.F_Bool + } + return false +} + +func (m *Oneof) GetF_Int32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { + return x.F_Int32 + } + return 0 +} + +func (m *Oneof) GetF_Int64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { + return x.F_Int64 + } + return 0 +} + +func (m *Oneof) GetF_Fixed32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { + return x.F_Fixed32 + } + return 0 +} + +func (m *Oneof) GetF_Fixed64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { + return x.F_Fixed64 + } + return 0 +} + +func (m *Oneof) GetF_Uint32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { + return x.F_Uint32 + } + return 0 +} + +func (m *Oneof) GetF_Uint64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { + return x.F_Uint64 + } + return 0 +} + +func (m *Oneof) GetF_Float() float32 { + if x, ok := m.GetUnion().(*Oneof_F_Float); ok { + return x.F_Float + } + return 0 +} + +func (m *Oneof) GetF_Double() float64 { + if x, ok := m.GetUnion().(*Oneof_F_Double); ok { + return x.F_Double + } + return 0 +} + +func (m *Oneof) GetF_String() string { + if x, ok := m.GetUnion().(*Oneof_F_String); ok { + return x.F_String + } + return "" +} + +func (m *Oneof) GetF_Bytes() []byte { + if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { + return x.F_Bytes + } + return nil +} + +func (m *Oneof) GetF_Sint32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { + return x.F_Sint32 + } + return 0 +} + +func (m *Oneof) GetF_Sint64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { + return x.F_Sint64 + } + return 0 +} + +func (m *Oneof) GetF_Enum() MyMessage_Color { + if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { + return x.F_Enum + } + return MyMessage_RED +} + +func (m *Oneof) GetF_Message() *GoTestField { + if x, ok := m.GetUnion().(*Oneof_F_Message); ok { + return x.F_Message + } + return nil +} + +func (m *Oneof) GetFGroup() *Oneof_F_Group { + if x, ok := m.GetUnion().(*Oneof_FGroup); ok { + return x.FGroup + } + return nil +} + +func (m *Oneof) GetF_Largest_Tag() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { + return x.F_Largest_Tag + } + return 0 +} + +func (m *Oneof) GetValue() int32 { + if x, ok := m.GetTormato().(*Oneof_Value); ok { + return x.Value + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ + (*Oneof_F_Bool)(nil), + (*Oneof_F_Int32)(nil), + (*Oneof_F_Int64)(nil), + (*Oneof_F_Fixed32)(nil), + (*Oneof_F_Fixed64)(nil), + (*Oneof_F_Uint32)(nil), + (*Oneof_F_Uint64)(nil), + (*Oneof_F_Float)(nil), + (*Oneof_F_Double)(nil), + (*Oneof_F_String)(nil), + (*Oneof_F_Bytes)(nil), + (*Oneof_F_Sint32)(nil), + (*Oneof_F_Sint64)(nil), + (*Oneof_F_Enum)(nil), + (*Oneof_F_Message)(nil), + (*Oneof_FGroup)(nil), + (*Oneof_F_Largest_Tag)(nil), + (*Oneof_Value)(nil), + } +} + +func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + t := uint64(0) + if x.F_Bool { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Oneof_F_Int32: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + b.EncodeVarint(4<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(x.F_Fixed32)) + case *Oneof_F_Fixed64: + b.EncodeVarint(5<<3 | proto.WireFixed64) + b.EncodeFixed64(uint64(x.F_Fixed64)) + case *Oneof_F_Uint32: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + b.EncodeVarint(7<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + b.EncodeVarint(8<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) + case *Oneof_F_Double: + b.EncodeVarint(9<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.F_Double)) + case *Oneof_F_String: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.F_String) + case *Oneof_F_Bytes: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeRawBytes(x.F_Bytes) + case *Oneof_F_Sint32: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.F_Sint32)) + case *Oneof_F_Sint64: + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeZigzag64(uint64(x.F_Sint64)) + case *Oneof_F_Enum: + b.EncodeVarint(14<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.F_Message); err != nil { + return err + } + case *Oneof_FGroup: + b.EncodeVarint(16<<3 | proto.WireStartGroup) + if err := b.Marshal(x.FGroup); err != nil { + return err + } + b.EncodeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + b.EncodeVarint(536870911<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + return fmt.Errorf("Oneof.Union has unexpected type %T", x) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + b.EncodeVarint(100<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Value)) + case nil: + default: + return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) + } + return nil +} + +func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Oneof) + switch tag { + case 1: // union.F_Bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Bool{x != 0} + return true, err + case 2: // union.F_Int32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int32{int32(x)} + return true, err + case 3: // union.F_Int64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int64{int64(x)} + return true, err + case 4: // union.F_Fixed32 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Fixed32{uint32(x)} + return true, err + case 5: // union.F_Fixed64 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Fixed64{x} + return true, err + case 6: // union.F_Uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint32{uint32(x)} + return true, err + case 7: // union.F_Uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint64{x} + return true, err + case 8: // union.F_Float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} + return true, err + case 9: // union.F_Double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Double{math.Float64frombits(x)} + return true, err + case 10: // union.F_String + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Oneof_F_String{x} + return true, err + case 11: // union.F_Bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Oneof_F_Bytes{x} + return true, err + case 12: // union.F_Sint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Oneof_F_Sint32{int32(x)} + return true, err + case 13: // union.F_Sint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag64() + m.Union = &Oneof_F_Sint64{int64(x)} + return true, err + case 14: // union.F_Enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Enum{MyMessage_Color(x)} + return true, err + case 15: // union.F_Message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GoTestField) + err := b.DecodeMessage(msg) + m.Union = &Oneof_F_Message{msg} + return true, err + case 16: // union.f_group + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Oneof_F_Group) + err := b.DecodeGroup(msg) + m.Union = &Oneof_FGroup{msg} + return true, err + case 536870911: // union.F_Largest_Tag + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Largest_Tag{int32(x)} + return true, err + case 100: // tormato.value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Tormato = &Oneof_Value{int32(x)} + return true, err + default: + return false, nil + } +} + +func _Oneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Oneof_F_Int32: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + n += proto.SizeVarint(4<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Fixed64: + n += proto.SizeVarint(5<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_Uint32: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + n += proto.SizeVarint(7<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + n += proto.SizeVarint(8<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Double: + n += proto.SizeVarint(9<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_String: + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_String))) + n += len(x.F_String) + case *Oneof_F_Bytes: + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_Bytes))) + n += len(x.F_Bytes) + case *Oneof_F_Sint32: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) + case *Oneof_F_Sint64: + n += proto.SizeVarint(13<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) + case *Oneof_F_Enum: + n += proto.SizeVarint(14<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + s := proto.Size(x.F_Message) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Oneof_FGroup: + n += proto.SizeVarint(16<<3 | proto.WireStartGroup) + n += proto.Size(x.FGroup) + n += proto.SizeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + n += proto.SizeVarint(536870911<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + n += proto.SizeVarint(100<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Value)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oneof_F_Group struct { + X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} } + +func (m *Oneof_F_Group) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Col + // *Communique_Msg + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Col struct { + Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` +} +type Communique_Msg struct { + Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Col) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetCol() MyMessage_Color { + if x, ok := m.GetUnion().(*Communique_Col); ok { + return x.Col + } + return MyMessage_RED +} + +func (m *Communique) GetMsg() *Strings { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Col)(nil), + (*Communique_Msg)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Col: + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Col)) + case *Communique_Msg: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.col + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Col{MyMessage_Color(x)} + return true, err + case 10: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Strings) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Col: + n += proto.SizeVarint(9<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Col)) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_Complex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: (*ComplexExtension)(nil), + Field: 200, + Name: "testdata.complex", + Tag: "bytes,200,opt,name=complex", +} + +var E_RComplex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: ([]*ComplexExtension)(nil), + Field: 201, + Name: "testdata.r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum") + proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField") + proto.RegisterType((*GoTest)(nil), "testdata.GoTest") + proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup") + proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup") + proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup") + proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField") + proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group") + proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest") + proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup") + proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest") + proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest") + proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag") + proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage") + proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested") + proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage") + proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") + proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") + proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") + proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") + proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") + proto.RegisterType((*Ext)(nil), "testdata.Ext") + proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension") + proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage") + proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet") + proto.RegisterType((*Empty)(nil), "testdata.Empty") + proto.RegisterType((*MessageList)(nil), "testdata.MessageList") + proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message") + proto.RegisterType((*Strings)(nil), "testdata.Strings") + proto.RegisterType((*Defaults)(nil), "testdata.Defaults") + proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults") + proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum") + proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated") + proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld") + proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G") + proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew") + proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G") + proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint") + proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap") + proto.RegisterType((*Oneof)(nil), "testdata.Oneof") + proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group") + proto.RegisterType((*Communique)(nil), "testdata.Communique") + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_Complex) + proto.RegisterExtension(E_RComplex) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 4465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48, + 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e, + 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xc9, 0x7a, 0x63, 0x89, 0x0a, + 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x48, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, + 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9, + 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf, + 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab, + 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0xda, + 0x4e, 0xbb, 0x7c, 0x1d, 0xd2, 0x9b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0xe8, 0xda, 0x76, + 0x49, 0x52, 0xe5, 0xca, 0xbc, 0x36, 0xb7, 0xe2, 0x22, 0x56, 0x36, 0x9a, 0x4d, 0x83, 0xb6, 0x94, + 0xef, 0x40, 0x7e, 0xd3, 0xde, 0x37, 0x27, 0xce, 0x46, 0xdf, 0x1c, 0x74, 0xc8, 0x22, 0xa4, 0x9e, + 0xb6, 0x0f, 0xcc, 0x01, 0x32, 0x72, 0x46, 0x6a, 0x40, 0x0b, 0x84, 0x40, 0x72, 0xff, 0x64, 0x64, + 0x96, 0x64, 0xac, 0x4c, 0x3a, 0x27, 0x23, 0xb3, 0xfc, 0x2b, 0x57, 0x68, 0x27, 0x94, 0x49, 0xae, + 0x43, 0xf2, 0xcb, 0x7d, 0xab, 0xc3, 0x7b, 0x79, 0xcd, 0xef, 0x85, 0xb5, 0xaf, 0x7c, 0x79, 0x6b, + 0xe7, 0xb1, 0x91, 0x7c, 0xde, 0xb7, 0xd0, 0xfe, 0x7e, 0xfb, 0x60, 0x40, 0x4d, 0x49, 0xd4, 0xbe, + 0x43, 0x0b, 0xb4, 0x76, 0xb7, 0x3d, 0x6e, 0x0f, 0x4b, 0x09, 0x55, 0xaa, 0xa4, 0x8c, 0xd4, 0x88, + 0x16, 0xc8, 0x7d, 0x98, 0x33, 0xcc, 0x17, 0x47, 0xfd, 0xb1, 0xd9, 0xc1, 0xc1, 0x95, 0x92, 0xaa, + 0x5c, 0xc9, 0x4f, 0xdb, 0xc7, 0x46, 0x63, 0x6e, 0x2c, 0x62, 0x19, 0x79, 0x64, 0xb6, 0x1d, 0x97, + 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xcd, 0x91, 0xd3, 0xb7, 0xad, 0xf6, 0x80, 0x91, + 0xd3, 0xaa, 0x14, 0x43, 0xb6, 0x45, 0x2c, 0x79, 0x13, 0x8a, 0x1b, 0xad, 0x87, 0xb6, 0x3d, 0x68, + 0xb9, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x97, 0xd6, 0xba, 0x53, 0x22, 0x15, 0x50, + 0x36, 0x5a, 0x5b, 0x96, 0x53, 0xd5, 0x7c, 0x60, 0x5e, 0x95, 0x2b, 0x29, 0x63, 0xbe, 0x8b, 0xd5, + 0x53, 0xc8, 0x9a, 0xee, 0x23, 0x0b, 0xaa, 0x5c, 0x49, 0x30, 0x64, 0x4d, 0xf7, 0x90, 0xb7, 0x80, + 0x6c, 0xb4, 0x36, 0xfa, 0xc7, 0x66, 0x47, 0xb4, 0x3a, 0xa7, 0xca, 0x95, 0x8c, 0xa1, 0x74, 0x79, + 0xc3, 0x0c, 0xb4, 0x68, 0x79, 0x5e, 0x95, 0x2b, 0x69, 0x17, 0x2d, 0xd8, 0xbe, 0x01, 0x0b, 0x1b, + 0xad, 0x77, 0xfb, 0xc1, 0x01, 0x17, 0x55, 0xb9, 0x32, 0x67, 0x14, 0xbb, 0xac, 0x7e, 0x1a, 0x2b, + 0x1a, 0x56, 0x54, 0xb9, 0x92, 0xe4, 0x58, 0xc1, 0x2e, 0xce, 0x6e, 0x63, 0x60, 0xb7, 0x1d, 0x1f, + 0xba, 0xa0, 0xca, 0x15, 0xd9, 0x98, 0xef, 0x62, 0x75, 0xd0, 0xea, 0x63, 0xfb, 0xe8, 0x60, 0x60, + 0xfa, 0x50, 0xa2, 0xca, 0x15, 0xc9, 0x28, 0x76, 0x59, 0x7d, 0x10, 0xbb, 0xe7, 0x8c, 0xfb, 0x56, + 0xcf, 0xc7, 0x9e, 0x47, 0xfd, 0x16, 0xbb, 0xac, 0x3e, 0x38, 0x82, 0x87, 0x27, 0x8e, 0x39, 0xf1, + 0xa1, 0xa6, 0x2a, 0x57, 0x0a, 0xc6, 0x7c, 0x17, 0xab, 0x43, 0x56, 0x43, 0x6b, 0xd0, 0x55, 0xe5, + 0xca, 0x02, 0xb5, 0x3a, 0x63, 0x0d, 0xf6, 0x42, 0x6b, 0xd0, 0x53, 0xe5, 0x0a, 0xe1, 0x58, 0x61, + 0x0d, 0x44, 0xcd, 0x30, 0x21, 0x96, 0x16, 0xd5, 0x84, 0xa0, 0x19, 0x56, 0x19, 0xd4, 0x0c, 0x07, + 0xbe, 0xa6, 0x26, 0x44, 0xcd, 0x84, 0x90, 0xd8, 0x39, 0x47, 0x5e, 0x50, 0x13, 0xa2, 0x66, 0x38, + 0x32, 0xa4, 0x19, 0x8e, 0x7d, 0x5d, 0x4d, 0x04, 0x35, 0x33, 0x85, 0x16, 0x2d, 0x97, 0xd4, 0x44, + 0x50, 0x33, 0x1c, 0x1d, 0xd4, 0x0c, 0x07, 0x5f, 0x54, 0x13, 0x01, 0xcd, 0x84, 0xb1, 0xa2, 0xe1, + 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x38, 0x3b, 0x57, 0x33, 0x1c, 0xba, 0xac, 0x26, 0x44, 0xcd, 0x88, + 0x56, 0x3d, 0xcd, 0x70, 0xe8, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x88, 0xf5, 0x34, 0xc3, 0xb1, 0x97, + 0xd5, 0x44, 0x40, 0x33, 0x1c, 0x7b, 0x5d, 0xd4, 0x0c, 0x87, 0x7e, 0x2c, 0xa9, 0x09, 0x51, 0x34, + 0x1c, 0x7a, 0x33, 0x20, 0x1a, 0x8e, 0xfd, 0x84, 0x62, 0x45, 0xd5, 0x84, 0xc1, 0xe2, 0x2a, 0x7c, + 0x4a, 0xc1, 0xa2, 0x6c, 0x38, 0xd8, 0x97, 0x8d, 0xeb, 0x82, 0x4a, 0x57, 0x54, 0xc9, 0x93, 0x8d, + 0xeb, 0xc3, 0x44, 0xd9, 0x78, 0xc0, 0xab, 0xe8, 0x6a, 0xb9, 0x6c, 0xa6, 0x90, 0x35, 0xdd, 0x47, + 0xaa, 0xaa, 0xe4, 0xcb, 0xc6, 0x43, 0x06, 0x64, 0xe3, 0x61, 0xaf, 0xa9, 0x92, 0x28, 0x9b, 0x19, + 0x68, 0xd1, 0x72, 0x59, 0x95, 0x44, 0xd9, 0x78, 0x68, 0x51, 0x36, 0x1e, 0xf8, 0x0b, 0xaa, 0x24, + 0xc8, 0x66, 0x1a, 0x2b, 0x1a, 0xfe, 0xa2, 0x2a, 0x09, 0xb2, 0x09, 0xce, 0x8e, 0xc9, 0xc6, 0x83, + 0xbe, 0xa1, 0x4a, 0xbe, 0x6c, 0x82, 0x56, 0xb9, 0x6c, 0x3c, 0xe8, 0x9b, 0xaa, 0x24, 0xc8, 0x26, + 0x88, 0xe5, 0xb2, 0xf1, 0xb0, 0x6f, 0x61, 0x7c, 0x73, 0x65, 0xe3, 0x61, 0x05, 0xd9, 0x78, 0xd0, + 0xdf, 0xa1, 0xb1, 0xd0, 0x93, 0x8d, 0x07, 0x15, 0x65, 0xe3, 0x61, 0x7f, 0x97, 0x62, 0x7d, 0xd9, + 0x4c, 0x83, 0xc5, 0x55, 0xf8, 0x3d, 0x0a, 0xf6, 0x65, 0xe3, 0x81, 0x57, 0x70, 0x10, 0x54, 0x36, + 0x1d, 0xb3, 0xdb, 0x3e, 0x1a, 0x50, 0x89, 0x55, 0xa8, 0x6e, 0xea, 0x49, 0x67, 0x7c, 0x64, 0xd2, + 0x91, 0xd8, 0xf6, 0xe0, 0xb1, 0xdb, 0x46, 0x56, 0xa8, 0x71, 0x26, 0x1f, 0x9f, 0x70, 0x9d, 0xea, + 0xa7, 0x2e, 0x57, 0x35, 0xa3, 0xc8, 0x34, 0x34, 0x8d, 0xaf, 0xe9, 0x02, 0xfe, 0x06, 0x55, 0x51, + 0x5d, 0xae, 0xe9, 0x0c, 0x5f, 0xd3, 0x7d, 0x7c, 0x15, 0xce, 0xfb, 0x52, 0xf2, 0x19, 0x37, 0xa9, + 0x96, 0xea, 0x89, 0xaa, 0xb6, 0x6a, 0x2c, 0xb8, 0x82, 0x9a, 0x45, 0x0a, 0x74, 0x73, 0x8b, 0x4a, + 0xaa, 0x9e, 0xa8, 0xe9, 0x1e, 0x49, 0xec, 0x49, 0xa3, 0x32, 0xe4, 0xc2, 0xf2, 0x39, 0xb7, 0xa9, + 0xb2, 0xea, 0xc9, 0xaa, 0xb6, 0xba, 0x6a, 0x28, 0x5c, 0x5f, 0x33, 0x38, 0x81, 0x7e, 0x56, 0xa8, + 0xc2, 0xea, 0xc9, 0x9a, 0xee, 0x71, 0x82, 0xfd, 0x2c, 0xb8, 0x42, 0xf3, 0x29, 0x5f, 0xa2, 0x4a, + 0xab, 0xa7, 0xab, 0x6b, 0xfa, 0xda, 0xfa, 0x3d, 0xa3, 0xc8, 0x14, 0xe7, 0x73, 0x74, 0xda, 0x0f, + 0x97, 0x9c, 0x4f, 0x5a, 0xa5, 0x9a, 0xab, 0xa7, 0xb5, 0x3b, 0x6b, 0x77, 0xb5, 0xbb, 0x86, 0xc2, + 0xb5, 0xe7, 0xb3, 0xde, 0xa1, 0x2c, 0x2e, 0x3e, 0x9f, 0xb5, 0x46, 0xd5, 0x57, 0x57, 0x9e, 0x99, + 0x83, 0x81, 0x7d, 0x4b, 0x2d, 0xbf, 0xb4, 0xc7, 0x83, 0xce, 0xb5, 0x32, 0x18, 0x0a, 0xd7, 0xa3, + 0xd8, 0xeb, 0x82, 0x2b, 0x48, 0x9f, 0xfe, 0x6b, 0xf4, 0x1e, 0x56, 0xa8, 0x67, 0x1e, 0xf6, 0x7b, + 0x96, 0x3d, 0x31, 0x8d, 0x22, 0x93, 0x66, 0x68, 0x4d, 0xf6, 0xc2, 0xeb, 0xf8, 0xeb, 0x94, 0xb6, + 0x50, 0x4f, 0xdc, 0xae, 0x6a, 0xb4, 0xa7, 0x59, 0xeb, 0xb8, 0x17, 0x5e, 0xc7, 0xdf, 0xa0, 0x1c, + 0x52, 0x4f, 0xdc, 0xae, 0xe9, 0x9c, 0x23, 0xae, 0xe3, 0x1d, 0xb8, 0x10, 0x8a, 0x8b, 0xad, 0x51, + 0xfb, 0xf0, 0xb9, 0xd9, 0x29, 0x69, 0x34, 0x3c, 0x3e, 0x94, 0x15, 0xc9, 0x38, 0x1f, 0x08, 0x91, + 0xbb, 0xd8, 0x4c, 0xee, 0xc1, 0xeb, 0xe1, 0x40, 0xe9, 0x32, 0xab, 0x34, 0x5e, 0x22, 0x73, 0x31, + 0x18, 0x33, 0x43, 0x54, 0xc1, 0x01, 0xbb, 0x54, 0x9d, 0x06, 0x50, 0x9f, 0xea, 0x7b, 0x62, 0x4e, + 0xfd, 0x19, 0xb8, 0x38, 0x1d, 0x4a, 0x5d, 0xf2, 0x3a, 0x8d, 0xa8, 0x48, 0xbe, 0x10, 0x8e, 0xaa, + 0x53, 0xf4, 0x19, 0x7d, 0xd7, 0x68, 0x88, 0x15, 0xe9, 0x53, 0xbd, 0xdf, 0x87, 0xd2, 0x54, 0xb0, + 0x75, 0xd9, 0x77, 0x68, 0xcc, 0x45, 0xf6, 0x6b, 0xa1, 0xb8, 0x1b, 0x26, 0xcf, 0xe8, 0xfa, 0x2e, + 0x0d, 0xc2, 0x02, 0x79, 0xaa, 0x67, 0x5c, 0xb2, 0x60, 0x38, 0x76, 0xb9, 0xf7, 0x68, 0x54, 0xe6, + 0x4b, 0x16, 0x88, 0xcc, 0x62, 0xbf, 0xa1, 0xf8, 0xec, 0x72, 0xeb, 0x34, 0x4c, 0xf3, 0x7e, 0x83, + 0xa1, 0x9a, 0x93, 0xdf, 0xa6, 0xe4, 0xbd, 0xd9, 0x33, 0xfe, 0x71, 0x82, 0x06, 0x58, 0xce, 0xde, + 0x9b, 0x35, 0x65, 0x8f, 0x3d, 0x63, 0xca, 0x3f, 0xa1, 0x6c, 0x22, 0xb0, 0xa7, 0xe6, 0xfc, 0x18, + 0xbc, 0x8c, 0xa3, 0x37, 0xb6, 0x8f, 0x46, 0xa5, 0x0d, 0x55, 0xae, 0x80, 0x76, 0x65, 0x2a, 0xfb, + 0x71, 0x2f, 0x79, 0x9b, 0x14, 0x65, 0x04, 0x49, 0xcc, 0x0a, 0xb3, 0xcb, 0xac, 0xec, 0xaa, 0x89, + 0x08, 0x2b, 0x0c, 0xe5, 0x59, 0x11, 0x48, 0xd4, 0x8a, 0xeb, 0xf4, 0x99, 0x95, 0x0f, 0x54, 0x69, + 0xa6, 0x15, 0x37, 0x04, 0x70, 0x2b, 0x01, 0xd2, 0xd2, 0xba, 0x9f, 0x6f, 0x61, 0x3b, 0xf9, 0x62, + 0x38, 0x01, 0xdb, 0xc4, 0xfb, 0x73, 0x30, 0xd3, 0x62, 0x34, 0x61, 0x70, 0xd3, 0xb4, 0x9f, 0x8d, + 0xa0, 0x05, 0x46, 0x33, 0x4d, 0xfb, 0xb9, 0x19, 0xb4, 0xf2, 0x6f, 0x4a, 0x90, 0xa4, 0xf9, 0x24, + 0xc9, 0x42, 0xf2, 0xbd, 0xe6, 0xd6, 0x63, 0xe5, 0x1c, 0xfd, 0xf5, 0xb0, 0xd9, 0x7c, 0xaa, 0x48, + 0x24, 0x07, 0xa9, 0x87, 0x5f, 0xd9, 0x6f, 0xec, 0x29, 0x32, 0x29, 0x42, 0x7e, 0x63, 0x6b, 0x67, + 0xb3, 0x61, 0xec, 0x1a, 0x5b, 0x3b, 0xfb, 0x4a, 0x82, 0xb6, 0x6d, 0x3c, 0x6d, 0x3e, 0xd8, 0x57, + 0x92, 0x24, 0x03, 0x09, 0x5a, 0x97, 0x22, 0x00, 0xe9, 0xbd, 0x7d, 0x63, 0x6b, 0x67, 0x53, 0x49, + 0x53, 0x2b, 0xfb, 0x5b, 0xdb, 0x0d, 0x25, 0x43, 0x91, 0xfb, 0xef, 0xee, 0x3e, 0x6d, 0x28, 0x59, + 0xfa, 0xf3, 0x81, 0x61, 0x3c, 0xf8, 0x8a, 0x92, 0xa3, 0xa4, 0xed, 0x07, 0xbb, 0x0a, 0x60, 0xf3, + 0x83, 0x87, 0x4f, 0x1b, 0x4a, 0x9e, 0x14, 0x20, 0xbb, 0xf1, 0xee, 0xce, 0xa3, 0xfd, 0xad, 0xe6, + 0x8e, 0x52, 0x28, 0x9f, 0x40, 0x89, 0x2d, 0x73, 0x60, 0x15, 0x59, 0x52, 0xf8, 0x0e, 0xa4, 0xd8, + 0xce, 0x48, 0xa8, 0x92, 0x4a, 0x78, 0x67, 0xa6, 0x29, 0x2b, 0x6c, 0x8f, 0x18, 0x6d, 0xe9, 0x32, + 0xa4, 0xd8, 0x2a, 0x2d, 0x42, 0x8a, 0xad, 0x8e, 0x8c, 0xa9, 0x62, 0xaa, 0x8b, 0xab, 0xf2, 0x5b, + 0x32, 0xc0, 0xa6, 0xbd, 0xf7, 0xbc, 0x3f, 0xc2, 0x84, 0xfc, 0x32, 0xc0, 0xe4, 0x79, 0x7f, 0xd4, + 0x42, 0xd5, 0xf3, 0xa4, 0x32, 0x47, 0x6b, 0xd0, 0xdf, 0x91, 0x6b, 0x50, 0xc0, 0xe6, 0x2e, 0xf3, + 0x42, 0x98, 0x4b, 0x66, 0x8c, 0x3c, 0xad, 0xe3, 0x8e, 0x29, 0x08, 0xa9, 0xe9, 0x98, 0x42, 0xa6, + 0x05, 0x48, 0x4d, 0x27, 0x57, 0x01, 0x8b, 0xad, 0x09, 0x46, 0x14, 0x4c, 0x1b, 0x73, 0x06, 0xf6, + 0xcb, 0x62, 0x0c, 0x79, 0x1b, 0xb0, 0x4f, 0x36, 0xef, 0xe2, 0xf4, 0xe9, 0x70, 0x87, 0xbb, 0x42, + 0x7f, 0xb0, 0xd9, 0xfa, 0x84, 0xa5, 0x26, 0xe4, 0xbc, 0x7a, 0xda, 0x17, 0xd6, 0xf2, 0x19, 0x29, + 0x38, 0x23, 0xc0, 0x2a, 0x6f, 0x4a, 0x0c, 0xc0, 0x47, 0xb3, 0x80, 0xa3, 0x61, 0x24, 0x36, 0x9c, + 0xf2, 0x65, 0x98, 0xdb, 0xb1, 0x2d, 0x76, 0x7a, 0x71, 0x95, 0x0a, 0x20, 0xb5, 0x4b, 0x12, 0x66, + 0x4f, 0x52, 0xbb, 0x7c, 0x05, 0x40, 0x68, 0x53, 0x40, 0x3a, 0x60, 0x6d, 0xe8, 0x03, 0xa4, 0x83, + 0xf2, 0x4d, 0x48, 0x6f, 0xb7, 0x8f, 0xf7, 0xdb, 0x3d, 0x72, 0x0d, 0x60, 0xd0, 0x9e, 0x38, 0x2d, + 0x5c, 0xfa, 0xd2, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c, 0xad, 0x65, 0x2a, 0x7d, + 0x01, 0xd0, 0x1c, 0x74, 0xb6, 0xcd, 0xc9, 0xa4, 0xdd, 0x33, 0x49, 0x15, 0xd2, 0x96, 0x39, 0xa1, + 0xd1, 0x4e, 0xc2, 0x77, 0x84, 0x65, 0x7f, 0x15, 0x7c, 0xd4, 0xca, 0x0e, 0x42, 0x0c, 0x0e, 0x25, + 0x0a, 0x24, 0xac, 0xa3, 0x21, 0xbe, 0x93, 0xa4, 0x0c, 0xfa, 0x73, 0xe9, 0x12, 0xa4, 0x19, 0x86, + 0x10, 0x48, 0x5a, 0xed, 0xa1, 0x59, 0x62, 0xfd, 0xe2, 0xef, 0xf2, 0xaf, 0x4a, 0x00, 0x3b, 0xe6, + 0xcb, 0x33, 0xf4, 0xe9, 0xa3, 0x62, 0xfa, 0x4c, 0xb0, 0x3e, 0xef, 0xc7, 0xf5, 0x49, 0x75, 0xd6, + 0xb5, 0xed, 0x4e, 0x8b, 0x6d, 0x31, 0x7b, 0xd2, 0xc9, 0xd1, 0x1a, 0xdc, 0xb5, 0xf2, 0x07, 0x50, + 0xd8, 0xb2, 0x2c, 0x73, 0xec, 0x8e, 0x89, 0x40, 0xf2, 0x99, 0x3d, 0x71, 0xf8, 0xdb, 0x12, 0xfe, + 0x26, 0x25, 0x48, 0x8e, 0xec, 0xb1, 0xc3, 0xe6, 0x59, 0x4f, 0xea, 0xab, 0xab, 0xab, 0x06, 0xd6, + 0x90, 0x4b, 0x90, 0x3b, 0xb4, 0x2d, 0xcb, 0x3c, 0xa4, 0x93, 0x48, 0x60, 0x5a, 0xe3, 0x57, 0x94, + 0x7f, 0x59, 0x82, 0x42, 0xd3, 0x79, 0xe6, 0x1b, 0x57, 0x20, 0xf1, 0xdc, 0x3c, 0xc1, 0xe1, 0x25, + 0x0c, 0xfa, 0x93, 0x1e, 0x95, 0x9f, 0x6f, 0x0f, 0x8e, 0xd8, 0x5b, 0x53, 0xc1, 0x60, 0x05, 0x72, + 0x01, 0xd2, 0x2f, 0xcd, 0x7e, 0xef, 0x99, 0x83, 0x36, 0x65, 0x83, 0x97, 0xc8, 0x2d, 0x48, 0xf5, + 0xe9, 0x60, 0x4b, 0x49, 0x5c, 0xaf, 0x0b, 0xfe, 0x7a, 0x89, 0x73, 0x30, 0x18, 0xe8, 0x46, 0x36, + 0xdb, 0x51, 0x3e, 0xfa, 0xe8, 0xa3, 0x8f, 0xe4, 0x72, 0x17, 0x16, 0xdd, 0xc3, 0x1b, 0x98, 0xec, + 0x0e, 0x94, 0x06, 0xa6, 0xdd, 0xea, 0xf6, 0xad, 0xf6, 0x60, 0x70, 0xd2, 0x7a, 0x69, 0x5b, 0xad, + 0xb6, 0xd5, 0xb2, 0x27, 0x87, 0xed, 0x31, 0x2e, 0x40, 0x74, 0x17, 0x8b, 0x03, 0xd3, 0xde, 0x60, + 0xb4, 0xf7, 0x6d, 0xeb, 0x81, 0xd5, 0xa4, 0x9c, 0xf2, 0x1f, 0x24, 0x21, 0xb7, 0x7d, 0xe2, 0x5a, + 0x5f, 0x84, 0xd4, 0xa1, 0x7d, 0x64, 0xb1, 0xb5, 0x4c, 0x19, 0xac, 0xe0, 0xed, 0x91, 0x2c, 0xec, + 0xd1, 0x22, 0xa4, 0x5e, 0x1c, 0xd9, 0x8e, 0x89, 0xd3, 0xcd, 0x19, 0xac, 0x40, 0x57, 0x6b, 0x64, + 0x3a, 0xa5, 0x24, 0x26, 0xb7, 0xf4, 0xa7, 0x3f, 0xff, 0xd4, 0x19, 0xe6, 0x4f, 0x56, 0x20, 0x6d, + 0xd3, 0xd5, 0x9f, 0x94, 0xd2, 0xf8, 0xae, 0x26, 0xc0, 0xc5, 0x5d, 0x31, 0x38, 0x8a, 0x6c, 0xc1, + 0xc2, 0x4b, 0xb3, 0x35, 0x3c, 0x9a, 0x38, 0xad, 0x9e, 0xdd, 0xea, 0x98, 0xe6, 0xc8, 0x1c, 0x97, + 0xe6, 0xb0, 0x27, 0xc1, 0x27, 0xcc, 0x5a, 0x48, 0x63, 0xfe, 0xa5, 0xb9, 0x7d, 0x34, 0x71, 0x36, + 0xed, 0xc7, 0xc8, 0x22, 0x55, 0xc8, 0x8d, 0x4d, 0xea, 0x09, 0xe8, 0x60, 0x0b, 0xe1, 0xde, 0x03, + 0xd4, 0xec, 0xd8, 0x1c, 0x61, 0x05, 0x59, 0x87, 0xec, 0x41, 0xff, 0xb9, 0x39, 0x79, 0x66, 0x76, + 0x4a, 0x19, 0x55, 0xaa, 0xcc, 0x6b, 0x17, 0x7d, 0x8e, 0xb7, 0xac, 0x2b, 0x8f, 0xec, 0x81, 0x3d, + 0x36, 0x3c, 0x28, 0xb9, 0x0f, 0xb9, 0x89, 0x3d, 0x34, 0x99, 0xbe, 0xb3, 0x18, 0x54, 0x2f, 0xcf, + 0xe2, 0xed, 0xd9, 0x43, 0xd3, 0xf5, 0x60, 0x2e, 0x9e, 0x2c, 0xb3, 0x81, 0x1e, 0xd0, 0xab, 0x73, + 0x09, 0xf0, 0x69, 0x80, 0x0e, 0x08, 0xaf, 0xd2, 0x64, 0x89, 0x0e, 0xa8, 0xd7, 0xa5, 0x37, 0xa2, + 0x52, 0x1e, 0xf3, 0x4a, 0xaf, 0xbc, 0x74, 0x0b, 0x72, 0x9e, 0x41, 0xdf, 0xf5, 0x31, 0x77, 0x93, + 0x43, 0x7f, 0xc0, 0x5c, 0x1f, 0xf3, 0x35, 0x6f, 0x40, 0x0a, 0x87, 0x4d, 0x23, 0x94, 0xd1, 0xa0, + 0x01, 0x31, 0x07, 0xa9, 0x4d, 0xa3, 0xd1, 0xd8, 0x51, 0x24, 0x8c, 0x8d, 0x4f, 0xdf, 0x6d, 0x28, + 0xb2, 0xa0, 0xd8, 0xdf, 0x96, 0x20, 0xd1, 0x38, 0x46, 0xb5, 0xd0, 0x69, 0xb8, 0x27, 0x9a, 0xfe, + 0xd6, 0x6a, 0x90, 0x1c, 0xda, 0x63, 0x93, 0x9c, 0x9f, 0x31, 0xcb, 0x52, 0x0f, 0xf7, 0x4b, 0x78, + 0x45, 0x6e, 0x1c, 0x3b, 0x06, 0xe2, 0xb5, 0xb7, 0x20, 0xe9, 0x98, 0xc7, 0xce, 0x6c, 0xde, 0x33, + 0xd6, 0x01, 0x05, 0x68, 0x37, 0x21, 0x6d, 0x1d, 0x0d, 0x0f, 0xcc, 0xf1, 0x6c, 0x68, 0x1f, 0xa7, + 0xc7, 0x21, 0xe5, 0xf7, 0x40, 0x79, 0x64, 0x0f, 0x47, 0x03, 0xf3, 0xb8, 0x71, 0xec, 0x98, 0xd6, + 0xa4, 0x6f, 0x5b, 0x54, 0xcf, 0xdd, 0xfe, 0x18, 0xbd, 0x88, 0xc4, 0x02, 0xe0, 0x78, 0xe2, 0xd0, + 0x53, 0x3d, 0x31, 0x0f, 0x6d, 0xab, 0xc3, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3, 0xac, 0x3f, 0xa6, + 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x42, 0x91, 0xe7, 0x18, 0x13, 0xde, 0x71, 0xf9, 0x06, + 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x54, 0xce, 0xd1, 0x65, 0x6d, + 0xee, 0x34, 0x14, 0x89, 0xfe, 0xd8, 0x7f, 0xbf, 0x19, 0x58, 0xca, 0x4b, 0x50, 0xf0, 0xc6, 0xbe, + 0x67, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52, 0x8d, 0xe1, 0xc8, + 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90, 0x19, 0xf2, 0xf9, + 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa, 0x42, 0x46, 0xf0, + 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2, 0x36, 0x64, 0x58, + 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab, 0x63, 0x17, 0x95, + 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf, 0x9f, 0x82, 0xac, + 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a, 0x33, 0x32, 0xb2, + 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e, 0xaf, 0xb1, 0xa6, + 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e, 0x85, 0xfe, 0x98, + 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7, 0xcf, 0x76, 0xfd, + 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff, 0xf1, 0x01, 0x35, + 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62, 0xce, 0x82, 0x47, + 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0, 0xb9, 0xf4, 0xf3, + 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4, 0x3c, 0xc3, 0x93, + 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b, 0xc0, 0xc9, 0x15, + 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc, 0xb2, 0x79, 0x89, + 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e, 0xb0, 0xe4, 0x0b, + 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x2e, 0x9e, 0x86, 0x25, 0xca, 0xdb, + 0xed, 0x5b, 0xdd, 0x52, 0x11, 0x57, 0x22, 0xd1, 0xb7, 0xba, 0x46, 0xaa, 0x4b, 0x6b, 0x98, 0x06, + 0x76, 0x68, 0x9b, 0x82, 0x6d, 0xc9, 0xdb, 0xac, 0x91, 0x56, 0x91, 0x12, 0xa4, 0x36, 0x5a, 0x3b, + 0x6d, 0xab, 0xb4, 0xc0, 0x78, 0x56, 0xdb, 0x32, 0x92, 0xdd, 0x9d, 0xb6, 0x45, 0xde, 0x82, 0xc4, + 0xe4, 0xe8, 0xa0, 0x44, 0xc2, 0x5f, 0x56, 0xf6, 0x8e, 0x0e, 0xdc, 0xa1, 0x18, 0x14, 0x41, 0x96, + 0x21, 0x3b, 0x71, 0xc6, 0xad, 0x5f, 0x30, 0xc7, 0x76, 0xe9, 0x3c, 0x2e, 0xe1, 0x39, 0x23, 0x33, + 0x71, 0xc6, 0x1f, 0x98, 0x63, 0xfb, 0x8c, 0xce, 0xaf, 0x7c, 0x05, 0xf2, 0x82, 0x5d, 0x52, 0x04, + 0xc9, 0x62, 0x37, 0x85, 0xba, 0x74, 0xc7, 0x90, 0xac, 0xf2, 0x3e, 0x14, 0xdc, 0x1c, 0x06, 0xe7, + 0xab, 0xd1, 0x93, 0x34, 0xb0, 0xc7, 0x78, 0x3e, 0xe7, 0xb5, 0x4b, 0x62, 0x88, 0xf2, 0x61, 0x3c, + 0x5c, 0x30, 0x68, 0x59, 0x09, 0x0d, 0x45, 0x2a, 0xff, 0x50, 0x82, 0xc2, 0xb6, 0x3d, 0xf6, 0x1f, + 0x98, 0x17, 0x21, 0x75, 0x60, 0xdb, 0x83, 0x09, 0x9a, 0xcd, 0x1a, 0xac, 0x40, 0xde, 0x80, 0x02, + 0xfe, 0x70, 0x73, 0x4f, 0xd9, 0x7b, 0xda, 0xc8, 0x63, 0x3d, 0x4f, 0x38, 0x09, 0x24, 0xfb, 0x96, + 0x33, 0xe1, 0x9e, 0x0c, 0x7f, 0x93, 0x2f, 0x40, 0x9e, 0xfe, 0x75, 0x99, 0x49, 0xef, 0xc2, 0x0a, + 0xb4, 0x9a, 0x13, 0xdf, 0x82, 0x39, 0xdc, 0x7d, 0x0f, 0x96, 0xf1, 0x9e, 0x31, 0x0a, 0xac, 0x81, + 0x03, 0x4b, 0x90, 0x61, 0xae, 0x60, 0x82, 0x5f, 0xcb, 0x72, 0x86, 0x5b, 0xa4, 0xee, 0x15, 0x33, + 0x01, 0x16, 0xee, 0x33, 0x06, 0x2f, 0x95, 0x1f, 0x40, 0x16, 0xa3, 0x54, 0x73, 0xd0, 0x21, 0x65, + 0x90, 0x7a, 0x25, 0x13, 0x63, 0xe4, 0xa2, 0x70, 0xcd, 0xe7, 0xcd, 0x2b, 0x9b, 0x86, 0xd4, 0x5b, + 0x5a, 0x00, 0x69, 0x93, 0xde, 0xbb, 0x8f, 0xb9, 0x9b, 0x96, 0x8e, 0xcb, 0x4d, 0x6e, 0x62, 0xc7, + 0x7c, 0x19, 0x67, 0x62, 0xc7, 0x7c, 0xc9, 0x4c, 0x5c, 0x9d, 0x32, 0x41, 0x4b, 0x27, 0xfc, 0xd3, + 0xa1, 0x74, 0x52, 0xae, 0xc2, 0x1c, 0x1e, 0xcf, 0xbe, 0xd5, 0xdb, 0xb5, 0xfb, 0x16, 0xde, 0xf3, + 0xbb, 0x78, 0x4f, 0x92, 0x0c, 0xa9, 0x4b, 0xf7, 0xc0, 0x3c, 0x6e, 0x1f, 0xb2, 0x1b, 0x67, 0xd6, + 0x60, 0x85, 0xf2, 0x67, 0x49, 0x98, 0xe7, 0xae, 0xf5, 0xfd, 0xbe, 0xf3, 0x6c, 0xbb, 0x3d, 0x22, + 0x4f, 0xa1, 0x40, 0xbd, 0x6a, 0x6b, 0xd8, 0x1e, 0x8d, 0xe8, 0xf1, 0x95, 0xf0, 0xaa, 0x71, 0x7d, + 0xca, 0x55, 0x73, 0xfc, 0xca, 0x4e, 0x7b, 0x68, 0x6e, 0x33, 0x6c, 0xc3, 0x72, 0xc6, 0x27, 0x46, + 0xde, 0xf2, 0x6b, 0xc8, 0x16, 0xe4, 0x87, 0x93, 0x9e, 0x67, 0x4c, 0x46, 0x63, 0x95, 0x48, 0x63, + 0xdb, 0x93, 0x5e, 0xc0, 0x16, 0x0c, 0xbd, 0x0a, 0x3a, 0x30, 0xea, 0x8f, 0x3d, 0x5b, 0x89, 0x53, + 0x06, 0x46, 0x5d, 0x47, 0x70, 0x60, 0x07, 0x7e, 0x0d, 0x79, 0x0c, 0x40, 0x8f, 0x97, 0x63, 0xd3, + 0xd4, 0x09, 0x15, 0x94, 0xd7, 0xde, 0x8c, 0xb4, 0xb5, 0xe7, 0x8c, 0xf7, 0xed, 0x3d, 0x67, 0xcc, + 0x0c, 0xd1, 0x83, 0x89, 0xc5, 0xa5, 0x77, 0x40, 0x09, 0xcf, 0x5f, 0xbc, 0x91, 0xa7, 0x66, 0xdc, + 0xc8, 0x73, 0xfc, 0x46, 0x5e, 0x97, 0xef, 0x4a, 0x4b, 0xef, 0x41, 0x31, 0x34, 0x65, 0x91, 0x4e, + 0x18, 0xfd, 0xb6, 0x48, 0xcf, 0x6b, 0xaf, 0x0b, 0x9f, 0xb3, 0xc5, 0x0d, 0x17, 0xed, 0xbe, 0x03, + 0x4a, 0x78, 0xfa, 0xa2, 0xe1, 0x6c, 0x4c, 0xa6, 0x80, 0xfc, 0xfb, 0x30, 0x17, 0x98, 0xb2, 0x48, + 0xce, 0x9d, 0x32, 0xa9, 0xf2, 0x2f, 0xa5, 0x20, 0xd5, 0xb4, 0x4c, 0xbb, 0x4b, 0x5e, 0x0f, 0xc6, + 0xc9, 0x27, 0xe7, 0xdc, 0x18, 0x79, 0x31, 0x14, 0x23, 0x9f, 0x9c, 0xf3, 0x22, 0xe4, 0xc5, 0x50, + 0x84, 0x74, 0x9b, 0x6a, 0x3a, 0xb9, 0x3c, 0x15, 0x1f, 0x9f, 0x9c, 0x13, 0x82, 0xe3, 0xe5, 0xa9, + 0xe0, 0xe8, 0x37, 0xd7, 0x74, 0xea, 0x50, 0x83, 0x91, 0xf1, 0xc9, 0x39, 0x3f, 0x2a, 0x2e, 0x87, + 0xa3, 0xa2, 0xd7, 0x58, 0xd3, 0xd9, 0x90, 0x84, 0x88, 0x88, 0x43, 0x62, 0xb1, 0x70, 0x39, 0x1c, + 0x0b, 0x91, 0xc7, 0xa3, 0xe0, 0x72, 0x38, 0x0a, 0x62, 0x23, 0x8f, 0x7a, 0x17, 0x43, 0x51, 0x0f, + 0x8d, 0xb2, 0x70, 0xb7, 0x1c, 0x0e, 0x77, 0x8c, 0x27, 0x8c, 0x54, 0x8c, 0x75, 0x5e, 0x63, 0x4d, + 0x27, 0x5a, 0x28, 0xd0, 0x45, 0xdf, 0xf6, 0x71, 0x2f, 0xd0, 0xe9, 0xeb, 0x74, 0xd9, 0xdc, 0x8b, + 0x68, 0x31, 0xe6, 0x8b, 0x3f, 0xae, 0xa6, 0x7b, 0x11, 0xd3, 0x20, 0xd3, 0xe5, 0x09, 0xb0, 0x82, + 0x9e, 0x4b, 0x90, 0x25, 0x6e, 0xfe, 0xca, 0x46, 0x0b, 0x3d, 0x18, 0xce, 0x8b, 0xdd, 0xe9, 0x2b, + 0x30, 0xb7, 0xd1, 0x7a, 0xda, 0x1e, 0xf7, 0xcc, 0x89, 0xd3, 0xda, 0x6f, 0xf7, 0xbc, 0x47, 0x04, + 0xba, 0xff, 0xf9, 0x2e, 0x6f, 0xd9, 0x6f, 0xf7, 0xc8, 0x05, 0x57, 0x5c, 0x1d, 0x6c, 0x95, 0xb8, + 0xbc, 0x96, 0x5e, 0xa7, 0x8b, 0xc6, 0x8c, 0xa1, 0x2f, 0x5c, 0xe0, 0xbe, 0xf0, 0x61, 0x06, 0x52, + 0x47, 0x56, 0xdf, 0xb6, 0x1e, 0xe6, 0x20, 0xe3, 0xd8, 0xe3, 0x61, 0xdb, 0xb1, 0xcb, 0x3f, 0x92, + 0x00, 0x1e, 0xd9, 0xc3, 0xe1, 0x91, 0xd5, 0x7f, 0x71, 0x64, 0x92, 0x2b, 0x90, 0x1f, 0xb6, 0x9f, + 0x9b, 0xad, 0xa1, 0xd9, 0x3a, 0x1c, 0xbb, 0xe7, 0x20, 0x47, 0xab, 0xb6, 0xcd, 0x47, 0xe3, 0x13, + 0x52, 0x72, 0xaf, 0xe8, 0xa8, 0x1d, 0x94, 0x24, 0xbf, 0xb2, 0x2f, 0xf2, 0x4b, 0x67, 0x9a, 0xef, + 0xa1, 0x7b, 0xed, 0x64, 0x79, 0x44, 0x86, 0xef, 0x1e, 0x96, 0xa8, 0xe4, 0x1d, 0x73, 0x38, 0x6a, + 0x1d, 0xa2, 0x54, 0xa8, 0x1c, 0x52, 0xb4, 0xfc, 0x88, 0xdc, 0x86, 0xc4, 0xa1, 0x3d, 0x40, 0x91, + 0x9c, 0xb2, 0x2f, 0x14, 0x47, 0xde, 0x80, 0xc4, 0x70, 0xc2, 0x64, 0x93, 0xd7, 0x16, 0x84, 0x7b, + 0x02, 0x0b, 0x4d, 0x14, 0x36, 0x9c, 0xf4, 0xbc, 0x79, 0xdf, 0x28, 0x42, 0x62, 0xa3, 0xd9, 0xa4, + 0xb1, 0x7f, 0xa3, 0xd9, 0x5c, 0x53, 0xa4, 0xfa, 0x97, 0x20, 0xdb, 0x1b, 0x9b, 0x26, 0x75, 0x0f, + 0xb3, 0x73, 0x8e, 0x0f, 0x31, 0xd6, 0x79, 0xa0, 0xfa, 0x36, 0x64, 0x0e, 0x59, 0xd6, 0x41, 0x22, + 0xd2, 0xda, 0xd2, 0x1f, 0xb2, 0x47, 0x95, 0x25, 0xbf, 0x39, 0x9c, 0xa7, 0x18, 0xae, 0x8d, 0xfa, + 0x2e, 0xe4, 0xc6, 0xad, 0xd3, 0x0c, 0x7e, 0xcc, 0xa2, 0x4b, 0x9c, 0xc1, 0xec, 0x98, 0x57, 0xd5, + 0x1b, 0xb0, 0x60, 0xd9, 0xee, 0x37, 0x94, 0x56, 0x87, 0x9d, 0xb1, 0x8b, 0xd3, 0x57, 0x39, 0xd7, + 0xb8, 0xc9, 0xbe, 0x5b, 0x5a, 0x36, 0x6f, 0x60, 0xa7, 0xb2, 0xfe, 0x08, 0x14, 0xc1, 0x0c, 0xa6, + 0x9e, 0x71, 0x56, 0xba, 0xec, 0x43, 0xa9, 0x67, 0x05, 0xcf, 0x7d, 0xc8, 0x08, 0x3b, 0x99, 0x31, + 0x46, 0x7a, 0xec, 0xab, 0xb3, 0x67, 0x04, 0x5d, 0xdd, 0xb4, 0x11, 0xea, 0x6b, 0xa2, 0x8d, 0x3c, + 0x63, 0x1f, 0xa4, 0x45, 0x23, 0x35, 0x3d, 0xb4, 0x2a, 0x47, 0xa7, 0x0e, 0xa5, 0xcf, 0xbe, 0x27, + 0x7b, 0x56, 0x98, 0x03, 0x9c, 0x61, 0x26, 0x7e, 0x30, 0x1f, 0xb2, 0x4f, 0xcd, 0x01, 0x33, 0x53, + 0xa3, 0x99, 0x9c, 0x3a, 0x9a, 0xe7, 0xec, 0xbb, 0xae, 0x67, 0x66, 0x6f, 0xd6, 0x68, 0x26, 0xa7, + 0x8e, 0x66, 0xc0, 0xbe, 0xf8, 0x06, 0xcc, 0xd4, 0xf4, 0xfa, 0x26, 0x10, 0x71, 0xab, 0x79, 0x9c, + 0x88, 0xb1, 0x33, 0x64, 0xdf, 0xf1, 0xfd, 0xcd, 0x66, 0x94, 0x59, 0x86, 0xe2, 0x07, 0x64, 0xb1, + 0x4f, 0xfc, 0x41, 0x43, 0x35, 0xbd, 0xbe, 0x05, 0xe7, 0xc5, 0x89, 0x9d, 0x61, 0x48, 0xb6, 0x2a, + 0x55, 0x8a, 0xc6, 0x82, 0x3f, 0x35, 0xce, 0x99, 0x69, 0x2a, 0x7e, 0x50, 0x23, 0x55, 0xaa, 0x28, + 0x53, 0xa6, 0x6a, 0x7a, 0xfd, 0x01, 0x14, 0x05, 0x53, 0x07, 0x18, 0xa1, 0xa3, 0xcd, 0xbc, 0x60, + 0xff, 0x6b, 0xe1, 0x99, 0xa1, 0x11, 0x3d, 0xbc, 0x63, 0x3c, 0xc6, 0x45, 0x1b, 0x19, 0xb3, 0x7f, + 0x14, 0xf0, 0xc7, 0x82, 0x8c, 0xd0, 0x91, 0xc0, 0xfc, 0x3b, 0xce, 0xca, 0x84, 0xfd, 0x0b, 0x81, + 0x3f, 0x14, 0x4a, 0xa8, 0xf7, 0x03, 0xd3, 0x31, 0x69, 0x90, 0x8b, 0xb1, 0xe1, 0xa0, 0x47, 0x7e, + 0x33, 0x12, 0xb0, 0x22, 0x3e, 0x90, 0x08, 0xd3, 0xa6, 0xc5, 0xfa, 0x16, 0xcc, 0x9f, 0xdd, 0x21, + 0x7d, 0x2c, 0xb1, 0x6c, 0xb9, 0xba, 0x42, 0x13, 0x6a, 0x63, 0xae, 0x13, 0xf0, 0x4b, 0x0d, 0x98, + 0x3b, 0xb3, 0x53, 0xfa, 0x44, 0x62, 0x39, 0x27, 0xb5, 0x64, 0x14, 0x3a, 0x41, 0xcf, 0x34, 0x77, + 0x66, 0xb7, 0xf4, 0xa9, 0xc4, 0x1e, 0x28, 0x74, 0xcd, 0x33, 0xe2, 0x7a, 0xa6, 0xb9, 0x33, 0xbb, + 0xa5, 0xaf, 0xb2, 0x8c, 0x52, 0xd6, 0xab, 0xa2, 0x11, 0xf4, 0x05, 0xf3, 0x67, 0x77, 0x4b, 0x5f, + 0x93, 0xf0, 0xb1, 0x42, 0xd6, 0x75, 0x6f, 0x5d, 0x3c, 0xcf, 0x34, 0x7f, 0x76, 0xb7, 0xf4, 0x75, + 0x09, 0x9f, 0x34, 0x64, 0x7d, 0x3d, 0x60, 0x26, 0x38, 0x9a, 0xd3, 0xdd, 0xd2, 0x37, 0x24, 0x7c, + 0x65, 0x90, 0xf5, 0x9a, 0x67, 0x66, 0x6f, 0x6a, 0x34, 0xa7, 0xbb, 0xa5, 0x6f, 0xe2, 0x2d, 0xbe, + 0x2e, 0xeb, 0x77, 0x02, 0x66, 0xd0, 0x33, 0x15, 0x5f, 0xc1, 0x2d, 0x7d, 0x4b, 0xc2, 0xc7, 0x20, + 0x59, 0xbf, 0x6b, 0xb8, 0xbd, 0xfb, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9, 0x33, 0x09, 0xdf, 0x8c, + 0x64, 0xfd, 0x5e, 0xd0, 0x10, 0x7a, 0x26, 0xe5, 0x55, 0xdc, 0xd2, 0xb7, 0xa9, 0xa5, 0x62, 0x5d, + 0x5e, 0x5f, 0x35, 0xdc, 0x01, 0x08, 0x9e, 0x49, 0x79, 0x15, 0xb7, 0xf4, 0x1d, 0x6a, 0x4a, 0xa9, + 0xcb, 0xeb, 0x6b, 0x21, 0x53, 0x35, 0xbd, 0xfe, 0x08, 0x0a, 0x67, 0x75, 0x4b, 0xdf, 0x15, 0xdf, + 0xe2, 0xf2, 0x1d, 0xc1, 0x37, 0xed, 0x0a, 0x7b, 0x76, 0xaa, 0x63, 0xfa, 0x1e, 0xe6, 0x38, 0xf5, + 0xb9, 0x27, 0xec, 0xbd, 0x8a, 0x11, 0xfc, 0xed, 0x63, 0x6e, 0x6a, 0xdb, 0x3f, 0x1f, 0xa7, 0xfa, + 0xa8, 0xef, 0x4b, 0xf8, 0xa8, 0x55, 0xe0, 0x06, 0x11, 0xef, 0x9d, 0x14, 0xe6, 0xb0, 0x3e, 0xf4, + 0x67, 0x79, 0x9a, 0xb7, 0xfa, 0x81, 0xf4, 0x2a, 0xee, 0xaa, 0x9e, 0x68, 0xee, 0x34, 0xbc, 0xc5, + 0xc0, 0x9a, 0xb7, 0x21, 0x79, 0xac, 0xad, 0xae, 0x89, 0x57, 0x32, 0xf1, 0x2d, 0x97, 0x39, 0xa9, + 0xbc, 0x56, 0x14, 0x9e, 0xbb, 0x87, 0x23, 0xe7, 0xc4, 0x40, 0x16, 0x67, 0x6b, 0x91, 0xec, 0x4f, + 0x62, 0xd8, 0x1a, 0x67, 0x57, 0x23, 0xd9, 0x9f, 0xc6, 0xb0, 0xab, 0x9c, 0xad, 0x47, 0xb2, 0xbf, + 0x1a, 0xc3, 0xd6, 0x39, 0x7b, 0x3d, 0x92, 0xfd, 0xb5, 0x18, 0xf6, 0x3a, 0x67, 0xd7, 0x22, 0xd9, + 0x5f, 0x8f, 0x61, 0xd7, 0x38, 0xfb, 0x4e, 0x24, 0xfb, 0x1b, 0x31, 0xec, 0x3b, 0x9c, 0x7d, 0x37, + 0x92, 0xfd, 0xcd, 0x18, 0xf6, 0x5d, 0xce, 0xbe, 0x17, 0xc9, 0xfe, 0x56, 0x0c, 0xfb, 0x1e, 0x63, + 0xaf, 0xad, 0x46, 0xb2, 0x3f, 0x8b, 0x66, 0xaf, 0xad, 0x72, 0x76, 0xb4, 0xd6, 0xbe, 0x1d, 0xc3, + 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x77, 0x62, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x6e, + 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, + 0xfb, 0x31, 0x6c, 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x20, 0x86, 0xcd, 0xb5, 0xb6, 0x16, 0xad, + 0xb5, 0x3f, 0x8a, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x8f, 0x63, 0xd8, 0x5c, 0x6b, 0x6b, + 0xd1, 0x5a, 0xfb, 0x93, 0x18, 0x36, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x3f, 0x8d, 0x66, 0x6b, 0x5c, + 0x6b, 0x5a, 0xb4, 0xd6, 0xfe, 0x2c, 0x86, 0xcd, 0xb5, 0xa6, 0x45, 0x6b, 0xed, 0xcf, 0x63, 0xd8, + 0x5c, 0x6b, 0x5a, 0xb4, 0xd6, 0x7e, 0x18, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x17, 0x31, + 0x6c, 0xae, 0x35, 0x2d, 0x5a, 0x6b, 0x7f, 0x19, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x57, + 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, 0x6b, 0x7f, 0x1d, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, + 0x37, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, 0x6b, 0x7f, 0x1b, 0xc3, 0xe6, 0x5a, 0xab, 0x46, 0x6b, + 0xed, 0xef, 0xa2, 0xd9, 0x55, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x3e, 0x86, 0xcd, 0xb5, 0x56, + 0x8d, 0xd6, 0xda, 0x3f, 0xc4, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xc7, 0x18, 0x36, 0xd7, + 0x5a, 0x35, 0x5a, 0x6b, 0x3f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0x4f, 0x31, 0x6c, + 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x39, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc4, + 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xd7, 0x18, 0x36, 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0xff, + 0x16, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0xef, 0xd1, 0x6c, 0x9d, 0x6b, 0x4d, 0x8f, 0xd6, + 0xda, 0x7f, 0xc4, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x67, 0x0c, 0x9b, 0x6b, 0x4d, 0x8f, + 0xd6, 0xda, 0x7f, 0xc5, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x77, 0x0c, 0x9b, 0x6b, 0x4d, + 0x8f, 0xd6, 0xda, 0xff, 0xc4, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x38, 0x86, 0xcd, 0xb5, + 0xa6, 0x47, 0x6b, 0xed, 0x27, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0xff, 0x1b, 0xc3, 0xe6, + 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x7f, 0x31, 0x6c, 0xae, 0xb5, 0xf5, 0x68, 0xad, 0xfd, 0x7f, 0x34, + 0x7b, 0x7d, 0xf5, 0xa7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x40, 0x32, 0xb7, 0xac, 0x57, 0x39, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/github.com/golang/protobuf/proto/testdata/test.proto new file mode 100644 index 00000000000..70e3cfcda23 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.proto @@ -0,0 +1,548 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing a group containing a required field. +message GoTestRequiredGroupField { + required group Group = 1 { + required int32 Field = 2; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; + + extensions 100 to max; +} + +message RequiredInnerMessage { + required InnerMessage leo_finally_won_an_oscar = 1; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + optional RequiredInnerMessage we_must_go_deeper = 13; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message ComplexExtension { + optional int32 first = 1; + optional int32 second = 2; + repeated int32 third = 3; +} + +extend OtherMessage { + optional ComplexExtension complex = 200; + repeated ComplexExtension r_complex = 201; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; + optional bool exact = 2; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} + +message Oneof { + oneof union { + bool F_Bool = 1; + int32 F_Int32 = 2; + int64 F_Int64 = 3; + fixed32 F_Fixed32 = 4; + fixed64 F_Fixed64 = 5; + uint32 F_Uint32 = 6; + uint64 F_Uint64 = 7; + float F_Float = 8; + double F_Double = 9; + string F_String = 10; + bytes F_Bytes = 11; + sint32 F_Sint32 = 12; + sint64 F_Sint64 = 13; + MyMessage.Color F_Enum = 14; + GoTestField F_Message = 15; + group F_Group = 16 { + optional int32 x = 17; + } + int32 F_Largest_Tag = 536870911; + } + + oneof tormato { + int32 value = 100; + } +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + MyMessage.Color col = 9; + Strings msg = 10; + } +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 81e1bdf386a..965876bf033 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -154,7 +154,7 @@ func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { - log.Printf("proto: textWriter unindented too far") + log.Print("proto: textWriter unindented too far") return } w.ind-- @@ -170,20 +170,98 @@ func writeName(w *textWriter, props *Properties) error { return nil } -var ( - messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() -) - // raw is the interface satisfied by RawMessage. type raw interface { Bytes() []byte } -func writeStruct(w *textWriter, sv reflect.Value) error { - if sv.Type() == messageSetType { - return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") } + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -235,7 +313,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } continue } - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -277,7 +355,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -294,7 +372,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -366,7 +444,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } // Enums have a String method, so writeAny will work fine. - if err := writeAny(w, fv, props); err != nil { + if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -377,8 +455,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if pv.Type().Implements(extendableProtoType) { - if err := writeExtensions(w, pv); err != nil { + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { return err } } @@ -408,7 +486,7 @@ func writeRaw(w *textWriter, b []byte) error { } // writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) // Floats have special cases. @@ -435,7 +513,7 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { switch v.Kind() { case reflect.Slice: // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Interface().([]byte))); err != nil { + if err := writeString(w, string(v.Bytes())); err != nil { return err } case reflect.String: @@ -457,15 +535,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { } } w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := writeStruct(w, v); err != nil { + } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() @@ -525,44 +603,6 @@ func writeString(w *textWriter, s string) error { return w.WriteByte('"') } -func writeMessageSet(w *textWriter, ms *MessageSet) error { - for _, item := range ms.Item { - id := *item.TypeId - if msd, ok := messageSetMap[id]; ok { - // Known message set type. - if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { - return err - } - w.indent() - - pb := reflect.New(msd.t.Elem()) - if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { - if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { - return err - } - } else { - if err := writeStruct(w, pb.Elem()); err != nil { - return err - } - } - } else { - // Unknown type. - if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { - return err - } - w.indent() - if err := writeUnknownStruct(w, item.Message); err != nil { - return err - } - } - w.unindent() - if _, err := w.Write(gtNewline); err != nil { - return err - } - } - return nil -} - func writeUnknownStruct(w *textWriter, data []byte) (err error) { if !w.compact { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { @@ -647,19 +687,24 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] - ep := pv.Interface().(extendableProto) + ep, _ := extendable(pv.Interface()) // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. - m := ep.ExtensionMap() + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) + mu.Unlock() for _, extNum := range ids { ext := m[extNum] @@ -682,13 +727,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -697,7 +742,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { return nil } -func writeExtension(w *textWriter, name string, pb interface{}) error { +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -706,7 +751,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error { return err } } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -731,7 +776,15 @@ func (w *textWriter) writeIndent() { w.complete = false } -func marshalText(w io.Writer, pb Message, compact bool) error { +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -746,11 +799,11 @@ func marshalText(w io.Writer, pb Message, compact bool) error { aw := &textWriter{ w: ww, complete: true, - compact: compact, + compact: tm.Compact, } - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } @@ -764,7 +817,7 @@ func marshalText(w io.Writer, pb Message, compact bool) error { } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { + if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -773,25 +826,29 @@ func marshalText(w io.Writer, pb Message, compact bool) error { return nil } +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + // MarshalText writes a given protocol buffer in text format. // The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { - return marshalText(w, pb, false) -} +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } // MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, false) - return buf.String() -} +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } // CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } // CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, true) - return buf.String() -} +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 6d0cf258947..7e6f145a10d 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -44,6 +44,9 @@ import ( "unicode/utf8" ) +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + type ParseError struct { Message string Line int // 1-based line number @@ -119,6 +122,14 @@ func isWhitespace(c byte) bool { return false } +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + func (p *textParser) skipWhitespace() { i := 0 for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { @@ -155,7 +166,7 @@ func (p *textParser) advance() { p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -333,13 +344,13 @@ func (p *textParser) next() *token { p.advance() if p.done { p.cur.value = "" - } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { // Look for multiple quoted strings separated by whitespace, // and concatenate them. cat := p.cur for { p.skipWhitespace() - if p.done || p.s[0] != '"' { + if p.done || !isQuote(p.s[0]) { break } p.advance() @@ -443,7 +454,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]". + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { @@ -453,33 +467,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { break } if tok.value == "[" { - // Looks like an extension. + // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue } + var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { + if d.Name == extName { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) + return p.errorf("unrecognized extension %q", extName) } props := &Properties{} @@ -506,7 +561,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { } reqFieldErr = err } - ep := sv.Addr().Interface().(extendableProto) + ep := sv.Addr().Interface().(Message) if !rep { SetExtension(ep, desc, ext.Interface()) } else { @@ -558,8 +613,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { // The map entry should be this sequence of tokens: // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. tok := p.next() var terminator string @@ -571,32 +627,39 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { default: return p.errorf("expected '{' or '<', found %q", tok.value) } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } } dst.SetMapIndex(key, val) @@ -619,7 +682,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return err } reqFieldErr = err - } else if props.Required { + } + if props.Required { reqCount-- } @@ -635,6 +699,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go new file mode 100644 index 00000000000..f25d5b0b4c2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go @@ -0,0 +1,573 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + . "github.com/golang/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation with double quotes + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenation with single quotes + { + in: "count:42 name: 'My name is '\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenations with mixed quotes + { + in: "count:42 name: 'My name is '\n\"elsewhere\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + { + in: "count:42 name: \"My name is \"\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated field with list notation + { + in: `count:42 pet: ["horsey", "bunny"]`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Missing required field in a required submessage + { + in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, + err: `proto: required field "testdata.InnerMessage.host" not set`, + out: &MyMessage{ + Count: Int32(42), + WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `msg_mapping:>` + // omitted key + `msg_mapping:` + // omitted value + `byte_mapping:` + + `byte_mapping:<>` // omitted key and value + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + 0: {F: Float64(5.0)}, + 1: nil, + }, + ByteMapping: map[bool][]byte{ + false: nil, + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestOneofParsing(t *testing.T) { + const in = `name:"Shrek"` + m := new(Communique) + want := &Communique{Union: &Communique_Name{"Shrek"}} + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go new file mode 100644 index 00000000000..3eabacac8f9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_test.go @@ -0,0 +1,474 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func TestTextOneof(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&pb.Communique{}, ``}, + // scalar field + {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, + // message field + {&pb.Communique{Union: &pb.Communique_Msg{ + &pb.Strings{StringField: proto.String("why hello!")}, + }}, `msg:`}, + // bad oneof (should not panic) + {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; map format is the same as a repeated struct, + // and they are sorted by key (numerically for numeric keys). + { + &pb.MessageWithMap{NameMapping: map[int32]string{ + -1: "Negatory", + 7: "Lucky", + 1234: "Feist", + 6345789: "Otis", + }}, + `name_mapping: ` + + `name_mapping: ` + + `name_mapping: ` + + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile index ae911fdebb0..4942418e32b 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile @@ -37,9 +37,3 @@ regenerate: sed 's,^package google_protobuf,package descriptor,' google/protobuf/descriptor.pb.go > \ $(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go && \ rm -f google/protobuf/descriptor.pb.go - -restore: - cp descriptor.pb.golden descriptor.pb.go - -preserve: - cp descriptor.pb.go descriptor.pb.golden diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index f36533d4b4d..2e06cb3a9e6 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package google_protobuf is a generated protocol buffer package. +Package descriptor is a generated protocol buffer package. It is generated from these files: google/protobuf/descriptor.proto @@ -21,12 +21,14 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions + OneofOptions EnumOptions EnumValueOptions ServiceOptions MethodOptions UninterpretedOption SourceCodeInfo + GeneratedCodeInfo */ package descriptor @@ -39,6 +41,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type FieldDescriptorProto_Type int32 const ( @@ -126,6 +134,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } type FieldDescriptorProto_Label int32 @@ -163,6 +172,9 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Label(value) return nil } +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 1} +} // Generated classes can be optimized for speed or code size. type FileOptions_OptimizeMode int32 @@ -201,6 +213,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } type FieldOptions_CType int32 @@ -238,6 +251,47 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} } // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. @@ -246,9 +300,10 @@ type FileDescriptorSet struct { XXX_unrecognized []byte `json:"-"` } -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -264,13 +319,13 @@ type FileDescriptorProto struct { // Names of files imported by this file. Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency" json:"public_dependency,omitempty"` + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency" json:"weak_dependency,omitempty"` + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type" json:"enum_type,omitempty"` + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` @@ -278,16 +333,17 @@ type FileDescriptorProto struct { // You may safely remove this entire field without harming runtime // functionality of the descriptors -- the information is needed only by // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info" json:"source_code_info,omitempty"` + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -375,20 +431,25 @@ func (m *FileDescriptorProto) GetSyntax() string { // Describes a message type. type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -446,6 +507,20 @@ func (m *DescriptorProto) GetOptions() *MessageOptions { return nil } +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + type DescriptorProto_ExtensionRange struct { Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` @@ -455,6 +530,9 @@ type DescriptorProto_ExtensionRange struct { func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} func (m *DescriptorProto_ExtensionRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -470,6 +548,36 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { return 0 } +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + // Describes a field within a message. type FieldDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -483,7 +591,7 @@ type FieldDescriptorProto struct { // rules are used to find the type (i.e. first the nested types within this // message are searched, then within the parent, on up to the root // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name" json:"type_name,omitempty"` + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` // For extensions, this is the name of the type being extended. It is // resolved in the same manner as type_name. Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` @@ -492,19 +600,23 @@ type FieldDescriptorProto struct { // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value" json:"default_value,omitempty"` + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. Extensions of a oneof should - // not set this since the oneof to which they belong will be inferred based - // on the extension range containing the extension's field number. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index" json:"oneof_index,omitempty"` + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -562,6 +674,13 @@ func (m *FieldDescriptorProto) GetOneofIndex() int32 { return 0 } +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + func (m *FieldDescriptorProto) GetOptions() *FieldOptions { if m != nil { return m.Options @@ -571,13 +690,15 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -586,6 +707,13 @@ func (m *OneofDescriptorProto) GetName() string { return "" } +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + // Describes an enum type. type EnumDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -594,9 +722,10 @@ type EnumDescriptorProto struct { XXX_unrecognized []byte `json:"-"` } -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -627,9 +756,10 @@ type EnumValueDescriptorProto struct { XXX_unrecognized []byte `json:"-"` } -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -660,9 +790,10 @@ type ServiceDescriptorProto struct { XXX_unrecognized []byte `json:"-"` } -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -690,19 +821,20 @@ type MethodDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type" json:"output_type,omitempty"` + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,def=0" json:"client_streaming,omitempty"` + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,def=0" json:"server_streaming,omitempty"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -754,23 +886,25 @@ type FileOptions struct { // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package" json:"java_package,omitempty"` + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` // If set, all the classes from the .proto file are wrapped in a single // outer class with the given name. This applies to both Proto1 // (equivalent to the old "--one_java_file" option) and Proto2 (where // a .proto always translates to a single class, but you may want to // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname" json:"java_outer_classname,omitempty"` + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` // If set true, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto // file. Thus, these types will *not* be nested inside the outer class // named by java_outer_classname. However, the outer class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,def=0" json:"java_multiple_files,omitempty"` + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // If set true, then the Java code generator will generate equals() and // hashCode() methods for all messages defined in the .proto file. - // - In the full runtime, this is purely a speed optimization, as the + // This increases generated code size, potentially substantially for large + // protos, which may harm a memory-constrained application. + // - In the full runtime this is a speed optimization, as the // AbstractMessage base class includes reflection-based implementations of // these methods. // - In the lite runtime, setting this option changes the semantics of @@ -778,21 +912,21 @@ type FileOptions struct { // the generated methods compute their results based on field values rather // than object identity. (Implementations should not assume that hashcodes // will be consistent across runtimes or versions of the protocol compiler.) - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,def=0" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. // Message reflection will do the same. // However, an extension field still accepts non-UTF-8 byte sequences. // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be // placed. If omitted, the Go package will be derived from the following: // - The basename of the package import path, if provided. // - Otherwise, the package statement in the .proto file, if present. // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package" json:"go_package,omitempty"` + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` // Should generic services be generated in each language? "Generic" services // are not specific to any particular RPC system. They are generated by the // main code generators in each language (without additional plugins). @@ -803,9 +937,9 @@ type FileOptions struct { // that generate code specific to your particular RPC system. Therefore, // these default to false. Old code which depends on generic services should // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,def=0" json:"py_generic_services,omitempty"` + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -813,16 +947,22 @@ type FileOptions struct { Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Enables the use of arenas for the proto messages in this file. This applies // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,def=0" json:"cc_enable_arenas,omitempty"` + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } var extRange_FileOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -831,12 +971,6 @@ var extRange_FileOptions = []proto.ExtensionRange{ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } -func (m *FileOptions) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaGenerateEqualsAndHash bool = false @@ -932,6 +1066,20 @@ func (m *FileOptions) GetCcEnableArenas() bool { return Default_FileOptions_CcEnableArenas } +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -958,11 +1106,11 @@ type MessageOptions struct { // // Because this is an option, the above two restrictions are not enforced by // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,def=0" json:"message_set_wire_format,omitempty"` + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` // Disables the generation of the standard "descriptor()" accessor, which can // conflict with a field of the same name. This is meant to make migration // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` // Is this message deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the message, or it will be completely ignored; in the very least, @@ -989,16 +1137,17 @@ type MessageOptions struct { // NOTE: Do not set the option in .proto files. Always use the maps syntax // instead. The option should only be implicitly set by the proto compiler // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry" json:"map_entry,omitempty"` + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } var extRange_MessageOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1007,12 +1156,6 @@ var extRange_MessageOptions = []proto.ExtensionRange{ func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } -func (m *MessageOptions) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1062,8 +1205,19 @@ type FieldOptions struct { // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the // inner message's contents will not be parsed but instead stored in encoded @@ -1101,14 +1255,15 @@ type FieldOptions struct { // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } var extRange_FieldOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1117,14 +1272,9 @@ var extRange_FieldOptions = []proto.ExtensionRange{ func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } -func (m *FieldOptions) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL const Default_FieldOptions_Lazy bool = false const Default_FieldOptions_Deprecated bool = false const Default_FieldOptions_Weak bool = false @@ -1143,6 +1293,13 @@ func (m *FieldOptions) GetPacked() bool { return false } +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + func (m *FieldOptions) GetLazy() bool { if m != nil && m.Lazy != nil { return *m.Lazy @@ -1171,24 +1328,52 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { return nil } +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + type EnumOptions struct { // Set this option to true to allow mapping different tag names to the same // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias" json:"allow_alias,omitempty"` + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` // Is this enum deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } var extRange_EnumOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1197,12 +1382,6 @@ var extRange_EnumOptions = []proto.ExtensionRange{ func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } -func (m *EnumOptions) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} const Default_EnumOptions_Deprecated bool = false @@ -1234,14 +1413,15 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } var extRange_EnumValueOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1250,12 +1430,6 @@ var extRange_EnumValueOptions = []proto.ExtensionRange{ func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } -func (m *EnumValueOptions) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} const Default_EnumValueOptions_Deprecated bool = false @@ -1280,14 +1454,15 @@ type ServiceOptions struct { // this is a formalization for deprecating services. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } var extRange_ServiceOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1296,12 +1471,6 @@ var extRange_ServiceOptions = []proto.ExtensionRange{ func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } -func (m *ServiceOptions) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} const Default_ServiceOptions_Deprecated bool = false @@ -1326,14 +1495,15 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` } -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } var extRange_MethodOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1342,12 +1512,6 @@ var extRange_MethodOptions = []proto.ExtensionRange{ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } -func (m *MethodOptions) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} const Default_MethodOptions_Deprecated bool = false @@ -1375,18 +1539,19 @@ type UninterpretedOption struct { Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value" json:"aggregate_value,omitempty"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1443,14 +1608,17 @@ func (m *UninterpretedOption) GetAggregateValue() string { // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension" json:"is_extension,omitempty"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} func (m *UninterpretedOption_NamePart) GetNamePart() string { if m != nil && m.NamePart != nil { @@ -1516,9 +1684,10 @@ type SourceCodeInfo struct { XXX_unrecognized []byte `json:"-"` } -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1565,6 +1734,11 @@ type SourceCodeInfo_Location struct { // A series of line comments appearing on consecutive lines, with no other // tokens appearing on those lines, will be treated as a single comment. // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // // Only the comment content is provided; comment markers (e.g. //) are // stripped out. For block comments, leading whitespace and an asterisk // will be stripped from the beginning of each line other than the first. @@ -1585,6 +1759,12 @@ type SourceCodeInfo_Location struct { // // Another line attached to qux. // optional double qux = 4; // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // // optional string corge = 5; // /* Block comment attached // * to corge. Leading asterisks @@ -1592,14 +1772,18 @@ type SourceCodeInfo_Location struct { // /* Block comment attached to // * grault. */ // optional int32 grault = 6; - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments" json:"trailing_comments,omitempty"` - XXX_unrecognized []byte `json:"-"` + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1629,9 +1813,264 @@ func (m *SourceCodeInfo_Location) GetTrailingComments() string { return "" } +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{19, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x73, 0xdb, 0xc6, + 0x15, 0x0f, 0xf8, 0x25, 0xf2, 0x91, 0xa2, 0x56, 0x2b, 0xc5, 0x86, 0xe5, 0x38, 0x96, 0x19, 0x3b, + 0x96, 0xed, 0x96, 0xce, 0xc8, 0x1f, 0x71, 0x94, 0x4e, 0x3a, 0x94, 0x08, 0x2b, 0xf4, 0x50, 0x22, + 0x0b, 0x4a, 0xad, 0x93, 0x1e, 0x30, 0x2b, 0x60, 0x49, 0xc1, 0x06, 0x17, 0x28, 0x00, 0xda, 0x56, + 0x4e, 0x9e, 0xe9, 0xa9, 0xc7, 0xde, 0x3a, 0x6d, 0xa7, 0xd3, 0xc9, 0x25, 0x33, 0xfd, 0x03, 0x7a, + 0xe8, 0xbd, 0xd7, 0xce, 0xf4, 0xde, 0x63, 0x67, 0xda, 0xff, 0xa0, 0xd7, 0xce, 0xee, 0x02, 0x20, + 0xf8, 0x15, 0xab, 0x99, 0x49, 0xd2, 0x93, 0xb8, 0xbf, 0xf7, 0x7b, 0x0f, 0x6f, 0xdf, 0x3e, 0xbc, + 0xf7, 0xb0, 0x82, 0xcd, 0x81, 0xeb, 0x0e, 0x1c, 0x7a, 0xd7, 0xf3, 0xdd, 0xd0, 0x3d, 0x19, 0xf5, + 0xef, 0x5a, 0x34, 0x30, 0x7d, 0xdb, 0x0b, 0x5d, 0xbf, 0x2e, 0x30, 0xbc, 0x22, 0x19, 0xf5, 0x98, + 0x51, 0x3b, 0x80, 0xd5, 0xc7, 0xb6, 0x43, 0x9b, 0x09, 0xb1, 0x47, 0x43, 0xfc, 0x08, 0x72, 0x7d, + 0xdb, 0xa1, 0xaa, 0xb2, 0x99, 0xdd, 0x2a, 0x6f, 0x5f, 0xaf, 0x4f, 0x29, 0xd5, 0x27, 0x35, 0xba, + 0x1c, 0xd6, 0x85, 0x46, 0xed, 0x9f, 0x39, 0x58, 0x9b, 0x23, 0xc5, 0x18, 0x72, 0x8c, 0x0c, 0xb9, + 0x45, 0x65, 0xab, 0xa4, 0x8b, 0xdf, 0x58, 0x85, 0x25, 0x8f, 0x98, 0xcf, 0xc9, 0x80, 0xaa, 0x19, + 0x01, 0xc7, 0x4b, 0xfc, 0x2e, 0x80, 0x45, 0x3d, 0xca, 0x2c, 0xca, 0xcc, 0x33, 0x35, 0xbb, 0x99, + 0xdd, 0x2a, 0xe9, 0x29, 0x04, 0xdf, 0x81, 0x55, 0x6f, 0x74, 0xe2, 0xd8, 0xa6, 0x91, 0xa2, 0xc1, + 0x66, 0x76, 0x2b, 0xaf, 0x23, 0x29, 0x68, 0x8e, 0xc9, 0x37, 0x61, 0xe5, 0x25, 0x25, 0xcf, 0xd3, + 0xd4, 0xb2, 0xa0, 0x56, 0x39, 0x9c, 0x22, 0xee, 0x41, 0x65, 0x48, 0x83, 0x80, 0x0c, 0xa8, 0x11, + 0x9e, 0x79, 0x54, 0xcd, 0x89, 0xdd, 0x6f, 0xce, 0xec, 0x7e, 0x7a, 0xe7, 0xe5, 0x48, 0xeb, 0xe8, + 0xcc, 0xa3, 0xb8, 0x01, 0x25, 0xca, 0x46, 0x43, 0x69, 0x21, 0xbf, 0x20, 0x7e, 0x1a, 0x1b, 0x0d, + 0xa7, 0xad, 0x14, 0xb9, 0x5a, 0x64, 0x62, 0x29, 0xa0, 0xfe, 0x0b, 0xdb, 0xa4, 0x6a, 0x41, 0x18, + 0xb8, 0x39, 0x63, 0xa0, 0x27, 0xe5, 0xd3, 0x36, 0x62, 0x3d, 0xbc, 0x07, 0x25, 0xfa, 0x2a, 0xa4, + 0x2c, 0xb0, 0x5d, 0xa6, 0x2e, 0x09, 0x23, 0x37, 0xe6, 0x9c, 0x22, 0x75, 0xac, 0x69, 0x13, 0x63, + 0x3d, 0xfc, 0x10, 0x96, 0x5c, 0x2f, 0xb4, 0x5d, 0x16, 0xa8, 0xc5, 0x4d, 0x65, 0xab, 0xbc, 0xfd, + 0xce, 0xdc, 0x44, 0xe8, 0x48, 0x8e, 0x1e, 0x93, 0x71, 0x0b, 0x50, 0xe0, 0x8e, 0x7c, 0x93, 0x1a, + 0xa6, 0x6b, 0x51, 0xc3, 0x66, 0x7d, 0x57, 0x2d, 0x09, 0x03, 0x57, 0x67, 0x37, 0x22, 0x88, 0x7b, + 0xae, 0x45, 0x5b, 0xac, 0xef, 0xea, 0xd5, 0x60, 0x62, 0x8d, 0x2f, 0x40, 0x21, 0x38, 0x63, 0x21, + 0x79, 0xa5, 0x56, 0x44, 0x86, 0x44, 0xab, 0xda, 0x7f, 0xf2, 0xb0, 0x72, 0x9e, 0x14, 0xfb, 0x18, + 0xf2, 0x7d, 0xbe, 0x4b, 0x35, 0xf3, 0xbf, 0xc4, 0x40, 0xea, 0x4c, 0x06, 0xb1, 0xf0, 0x0d, 0x83, + 0xd8, 0x80, 0x32, 0xa3, 0x41, 0x48, 0x2d, 0x99, 0x11, 0xd9, 0x73, 0xe6, 0x14, 0x48, 0xa5, 0xd9, + 0x94, 0xca, 0x7d, 0xa3, 0x94, 0x7a, 0x0a, 0x2b, 0x89, 0x4b, 0x86, 0x4f, 0xd8, 0x20, 0xce, 0xcd, + 0xbb, 0x6f, 0xf2, 0xa4, 0xae, 0xc5, 0x7a, 0x3a, 0x57, 0xd3, 0xab, 0x74, 0x62, 0x8d, 0x9b, 0x00, + 0x2e, 0xa3, 0x6e, 0xdf, 0xb0, 0xa8, 0xe9, 0xa8, 0xc5, 0x05, 0x51, 0xea, 0x70, 0xca, 0x4c, 0x94, + 0x5c, 0x89, 0x9a, 0x0e, 0xfe, 0x68, 0x9c, 0x6a, 0x4b, 0x0b, 0x32, 0xe5, 0x40, 0xbe, 0x64, 0x33, + 0xd9, 0x76, 0x0c, 0x55, 0x9f, 0xf2, 0xbc, 0xa7, 0x56, 0xb4, 0xb3, 0x92, 0x70, 0xa2, 0xfe, 0xc6, + 0x9d, 0xe9, 0x91, 0x9a, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e, 0xe2, 0xf7, 0x20, 0x01, 0x0c, 0x91, 0x56, + 0x20, 0xaa, 0x50, 0x25, 0x06, 0x0f, 0xc9, 0x90, 0x6e, 0x3c, 0x82, 0xea, 0x64, 0x78, 0xf0, 0x3a, + 0xe4, 0x83, 0x90, 0xf8, 0xa1, 0xc8, 0xc2, 0xbc, 0x2e, 0x17, 0x18, 0x41, 0x96, 0x32, 0x4b, 0x54, + 0xb9, 0xbc, 0xce, 0x7f, 0x6e, 0x7c, 0x08, 0xcb, 0x13, 0x8f, 0x3f, 0xaf, 0x62, 0xed, 0x37, 0x05, + 0x58, 0x9f, 0x97, 0x73, 0x73, 0xd3, 0xff, 0x02, 0x14, 0xd8, 0x68, 0x78, 0x42, 0x7d, 0x35, 0x2b, + 0x2c, 0x44, 0x2b, 0xdc, 0x80, 0xbc, 0x43, 0x4e, 0xa8, 0xa3, 0xe6, 0x36, 0x95, 0xad, 0xea, 0xf6, + 0x9d, 0x73, 0x65, 0x75, 0xbd, 0xcd, 0x55, 0x74, 0xa9, 0x89, 0x3f, 0x81, 0x5c, 0x54, 0xe2, 0xb8, + 0x85, 0xdb, 0xe7, 0xb3, 0xc0, 0x73, 0x51, 0x17, 0x7a, 0xf8, 0x32, 0x94, 0xf8, 0x5f, 0x19, 0xdb, + 0x82, 0xf0, 0xb9, 0xc8, 0x01, 0x1e, 0x57, 0xbc, 0x01, 0x45, 0x91, 0x66, 0x16, 0x8d, 0x5b, 0x43, + 0xb2, 0xe6, 0x07, 0x63, 0xd1, 0x3e, 0x19, 0x39, 0xa1, 0xf1, 0x82, 0x38, 0x23, 0x2a, 0x12, 0xa6, + 0xa4, 0x57, 0x22, 0xf0, 0xa7, 0x1c, 0xc3, 0x57, 0xa1, 0x2c, 0xb3, 0xd2, 0x66, 0x16, 0x7d, 0x25, + 0xaa, 0x4f, 0x5e, 0x97, 0x89, 0xda, 0xe2, 0x08, 0x7f, 0xfc, 0xb3, 0xc0, 0x65, 0xf1, 0xd1, 0x8a, + 0x47, 0x70, 0x40, 0x3c, 0xfe, 0xc3, 0xe9, 0xc2, 0x77, 0x65, 0xfe, 0xf6, 0xa6, 0x73, 0xb1, 0xf6, + 0xe7, 0x0c, 0xe4, 0xc4, 0xfb, 0xb6, 0x02, 0xe5, 0xa3, 0xcf, 0xba, 0x9a, 0xd1, 0xec, 0x1c, 0xef, + 0xb6, 0x35, 0xa4, 0xe0, 0x2a, 0x80, 0x00, 0x1e, 0xb7, 0x3b, 0x8d, 0x23, 0x94, 0x49, 0xd6, 0xad, + 0xc3, 0xa3, 0x87, 0xf7, 0x51, 0x36, 0x51, 0x38, 0x96, 0x40, 0x2e, 0x4d, 0xb8, 0xb7, 0x8d, 0xf2, + 0x18, 0x41, 0x45, 0x1a, 0x68, 0x3d, 0xd5, 0x9a, 0x0f, 0xef, 0xa3, 0xc2, 0x24, 0x72, 0x6f, 0x1b, + 0x2d, 0xe1, 0x65, 0x28, 0x09, 0x64, 0xb7, 0xd3, 0x69, 0xa3, 0x62, 0x62, 0xb3, 0x77, 0xa4, 0xb7, + 0x0e, 0xf7, 0x51, 0x29, 0xb1, 0xb9, 0xaf, 0x77, 0x8e, 0xbb, 0x08, 0x12, 0x0b, 0x07, 0x5a, 0xaf, + 0xd7, 0xd8, 0xd7, 0x50, 0x39, 0x61, 0xec, 0x7e, 0x76, 0xa4, 0xf5, 0x50, 0x65, 0xc2, 0xad, 0x7b, + 0xdb, 0x68, 0x39, 0x79, 0x84, 0x76, 0x78, 0x7c, 0x80, 0xaa, 0x78, 0x15, 0x96, 0xe5, 0x23, 0x62, + 0x27, 0x56, 0xa6, 0xa0, 0x87, 0xf7, 0x11, 0x1a, 0x3b, 0x22, 0xad, 0xac, 0x4e, 0x00, 0x0f, 0xef, + 0x23, 0x5c, 0xdb, 0x83, 0xbc, 0xc8, 0x2e, 0x8c, 0xa1, 0xda, 0x6e, 0xec, 0x6a, 0x6d, 0xa3, 0xd3, + 0x3d, 0x6a, 0x75, 0x0e, 0x1b, 0x6d, 0xa4, 0x8c, 0x31, 0x5d, 0xfb, 0xc9, 0x71, 0x4b, 0xd7, 0x9a, + 0x28, 0x93, 0xc6, 0xba, 0x5a, 0xe3, 0x48, 0x6b, 0xa2, 0x6c, 0xcd, 0x84, 0xf5, 0x79, 0x75, 0x66, + 0xee, 0x9b, 0x91, 0x3a, 0xe2, 0xcc, 0x82, 0x23, 0x16, 0xb6, 0x66, 0x8e, 0xf8, 0x4b, 0x05, 0xd6, + 0xe6, 0xd4, 0xda, 0xb9, 0x0f, 0xf9, 0x31, 0xe4, 0x65, 0x8a, 0xca, 0xee, 0x73, 0x6b, 0x6e, 0xd1, + 0x16, 0x09, 0x3b, 0xd3, 0x81, 0x84, 0x5e, 0xba, 0x03, 0x67, 0x17, 0x74, 0x60, 0x6e, 0x62, 0xc6, + 0xc9, 0x5f, 0x2a, 0xa0, 0x2e, 0xb2, 0xfd, 0x86, 0x42, 0x91, 0x99, 0x28, 0x14, 0x1f, 0x4f, 0x3b, + 0x70, 0x6d, 0xf1, 0x1e, 0x66, 0xbc, 0xf8, 0x4a, 0x81, 0x0b, 0xf3, 0x07, 0x95, 0xb9, 0x3e, 0x7c, + 0x02, 0x85, 0x21, 0x0d, 0x4f, 0xdd, 0xb8, 0x59, 0xbf, 0x3f, 0xa7, 0x05, 0x70, 0xf1, 0x74, 0xac, + 0x22, 0xad, 0x74, 0x0f, 0xc9, 0x2e, 0x9a, 0x36, 0xa4, 0x37, 0x33, 0x9e, 0xfe, 0x2a, 0x03, 0x6f, + 0xcf, 0x35, 0x3e, 0xd7, 0xd1, 0x2b, 0x00, 0x36, 0xf3, 0x46, 0xa1, 0x6c, 0xc8, 0xb2, 0x3e, 0x95, + 0x04, 0x22, 0xde, 0x7d, 0x5e, 0x7b, 0x46, 0x61, 0x22, 0xcf, 0x0a, 0x39, 0x48, 0x48, 0x10, 0x1e, + 0x8d, 0x1d, 0xcd, 0x09, 0x47, 0xdf, 0x5d, 0xb0, 0xd3, 0x99, 0x5e, 0xf7, 0x01, 0x20, 0xd3, 0xb1, + 0x29, 0x0b, 0x8d, 0x20, 0xf4, 0x29, 0x19, 0xda, 0x6c, 0x20, 0x0a, 0x70, 0x71, 0x27, 0xdf, 0x27, + 0x4e, 0x40, 0xf5, 0x15, 0x29, 0xee, 0xc5, 0x52, 0xae, 0x21, 0xba, 0x8c, 0x9f, 0xd2, 0x28, 0x4c, + 0x68, 0x48, 0x71, 0xa2, 0x51, 0xfb, 0xf5, 0x12, 0x94, 0x53, 0x63, 0x1d, 0xbe, 0x06, 0x95, 0x67, + 0xe4, 0x05, 0x31, 0xe2, 0x51, 0x5d, 0x46, 0xa2, 0xcc, 0xb1, 0x6e, 0x34, 0xae, 0x7f, 0x00, 0xeb, + 0x82, 0xe2, 0x8e, 0x42, 0xea, 0x1b, 0xa6, 0x43, 0x82, 0x40, 0x04, 0xad, 0x28, 0xa8, 0x98, 0xcb, + 0x3a, 0x5c, 0xb4, 0x17, 0x4b, 0xf0, 0x03, 0x58, 0x13, 0x1a, 0xc3, 0x91, 0x13, 0xda, 0x9e, 0x43, + 0x0d, 0xfe, 0xf1, 0x10, 0x88, 0x42, 0x9c, 0x78, 0xb6, 0xca, 0x19, 0x07, 0x11, 0x81, 0x7b, 0x14, + 0xe0, 0x7d, 0xb8, 0x22, 0xd4, 0x06, 0x94, 0x51, 0x9f, 0x84, 0xd4, 0xa0, 0xbf, 0x18, 0x11, 0x27, + 0x30, 0x08, 0xb3, 0x8c, 0x53, 0x12, 0x9c, 0xaa, 0xeb, 0x69, 0x03, 0x97, 0x38, 0x77, 0x3f, 0xa2, + 0x6a, 0x82, 0xd9, 0x60, 0xd6, 0xa7, 0x24, 0x38, 0xc5, 0x3b, 0x70, 0x41, 0x18, 0x0a, 0x42, 0xdf, + 0x66, 0x03, 0xc3, 0x3c, 0xa5, 0xe6, 0x73, 0x63, 0x14, 0xf6, 0x1f, 0xa9, 0x97, 0xd3, 0x16, 0x84, + 0x93, 0x3d, 0xc1, 0xd9, 0xe3, 0x94, 0xe3, 0xb0, 0xff, 0x08, 0xf7, 0xa0, 0xc2, 0xcf, 0x63, 0x68, + 0x7f, 0x41, 0x8d, 0xbe, 0xeb, 0x8b, 0xe6, 0x52, 0x9d, 0xf3, 0x72, 0xa7, 0x82, 0x58, 0xef, 0x44, + 0x0a, 0x07, 0xae, 0x45, 0x77, 0xf2, 0xbd, 0xae, 0xa6, 0x35, 0xf5, 0x72, 0x6c, 0xe5, 0xb1, 0xeb, + 0xf3, 0x9c, 0x1a, 0xb8, 0x49, 0x8c, 0xcb, 0x32, 0xa7, 0x06, 0x6e, 0x1c, 0xe1, 0x07, 0xb0, 0x66, + 0x9a, 0x72, 0xdb, 0xb6, 0x69, 0x44, 0x53, 0x7e, 0xa0, 0xa2, 0x89, 0x78, 0x99, 0xe6, 0xbe, 0x24, + 0x44, 0x69, 0x1e, 0xe0, 0x8f, 0xe0, 0xed, 0x71, 0xbc, 0xd2, 0x8a, 0xab, 0x33, 0xbb, 0x9c, 0x56, + 0x7d, 0x00, 0x6b, 0xde, 0xd9, 0xac, 0x22, 0x9e, 0x78, 0xa2, 0x77, 0x36, 0xad, 0x76, 0x43, 0x7c, + 0xb9, 0xf9, 0xd4, 0x24, 0x21, 0xb5, 0xd4, 0x8b, 0x69, 0x76, 0x4a, 0x80, 0xef, 0x02, 0x32, 0x4d, + 0x83, 0x32, 0x72, 0xe2, 0x50, 0x83, 0xf8, 0x94, 0x91, 0x40, 0xbd, 0x9a, 0x26, 0x57, 0x4d, 0x53, + 0x13, 0xd2, 0x86, 0x10, 0xe2, 0xdb, 0xb0, 0xea, 0x9e, 0x3c, 0x33, 0x65, 0x72, 0x19, 0x9e, 0x4f, + 0xfb, 0xf6, 0x2b, 0xf5, 0xba, 0x08, 0xd3, 0x0a, 0x17, 0x88, 0xd4, 0xea, 0x0a, 0x18, 0xdf, 0x02, + 0x64, 0x06, 0xa7, 0xc4, 0xf7, 0x44, 0x77, 0x0f, 0x3c, 0x62, 0x52, 0xf5, 0x86, 0xa4, 0x4a, 0xfc, + 0x30, 0x86, 0xf1, 0x53, 0x58, 0x1f, 0x31, 0x9b, 0x85, 0xd4, 0xf7, 0x7c, 0xca, 0x87, 0x74, 0xf9, + 0xa6, 0xa9, 0xff, 0x5a, 0x5a, 0x30, 0x66, 0x1f, 0xa7, 0xd9, 0xf2, 0x74, 0xf5, 0xb5, 0xd1, 0x2c, + 0x58, 0xdb, 0x81, 0x4a, 0xfa, 0xd0, 0x71, 0x09, 0xe4, 0xb1, 0x23, 0x85, 0xf7, 0xd0, 0xbd, 0x4e, + 0x93, 0x77, 0xbf, 0xcf, 0x35, 0x94, 0xe1, 0x5d, 0xb8, 0xdd, 0x3a, 0xd2, 0x0c, 0xfd, 0xf8, 0xf0, + 0xa8, 0x75, 0xa0, 0xa1, 0xec, 0xed, 0x52, 0xf1, 0xdf, 0x4b, 0xe8, 0xf5, 0xeb, 0xd7, 0xaf, 0x33, + 0x4f, 0x72, 0xc5, 0xf7, 0xd1, 0xcd, 0xda, 0x5f, 0x33, 0x50, 0x9d, 0x9c, 0x7f, 0xf1, 0x8f, 0xe0, + 0x62, 0xfc, 0xb1, 0x1a, 0xd0, 0xd0, 0x78, 0x69, 0xfb, 0x22, 0x1b, 0x87, 0x44, 0x4e, 0x90, 0x49, + 0x20, 0xd7, 0x23, 0x56, 0x8f, 0x86, 0x3f, 0xb3, 0x7d, 0x9e, 0x6b, 0x43, 0x12, 0xe2, 0x36, 0x5c, + 0x65, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xf1, 0x35, 0x81, 0x41, 0x4c, 0x93, 0x06, + 0x81, 0x2b, 0x1b, 0x41, 0x62, 0xe5, 0x1d, 0xe6, 0xf6, 0x22, 0xf2, 0xb8, 0x42, 0x36, 0x22, 0xea, + 0xd4, 0xa1, 0x67, 0x17, 0x1d, 0xfa, 0x65, 0x28, 0x0d, 0x89, 0x67, 0x50, 0x16, 0xfa, 0x67, 0x62, + 0x6a, 0x2b, 0xea, 0xc5, 0x21, 0xf1, 0x34, 0xbe, 0xfe, 0xf6, 0x4e, 0x22, 0x15, 0xcd, 0xda, 0x3f, + 0xb2, 0x50, 0x49, 0x4f, 0x6e, 0x7c, 0x10, 0x36, 0x45, 0x95, 0x56, 0xc4, 0x4b, 0xfc, 0xde, 0xd7, + 0xce, 0x79, 0xf5, 0x3d, 0x5e, 0xbe, 0x77, 0x0a, 0x72, 0x9e, 0xd2, 0xa5, 0x26, 0x6f, 0x9d, 0xfc, + 0xb5, 0xa5, 0x72, 0x4a, 0x2f, 0xea, 0xd1, 0x0a, 0xef, 0x43, 0xe1, 0x59, 0x20, 0x6c, 0x17, 0x84, + 0xed, 0xeb, 0x5f, 0x6f, 0xfb, 0x49, 0x4f, 0x18, 0x2f, 0x3d, 0xe9, 0x19, 0x87, 0x1d, 0xfd, 0xa0, + 0xd1, 0xd6, 0x23, 0x75, 0x7c, 0x09, 0x72, 0x0e, 0xf9, 0xe2, 0x6c, 0xb2, 0xd0, 0x0b, 0xe8, 0xbc, + 0x81, 0xbf, 0x04, 0xb9, 0x97, 0x94, 0x3c, 0x9f, 0x2c, 0xaf, 0x02, 0xfa, 0x16, 0x5f, 0x80, 0xbb, + 0x90, 0x17, 0xf1, 0xc2, 0x00, 0x51, 0xc4, 0xd0, 0x5b, 0xb8, 0x08, 0xb9, 0xbd, 0x8e, 0xce, 0x5f, + 0x02, 0x04, 0x15, 0x89, 0x1a, 0xdd, 0x96, 0xb6, 0xa7, 0xa1, 0x4c, 0xed, 0x01, 0x14, 0x64, 0x10, + 0xf8, 0x0b, 0x92, 0x84, 0x01, 0xbd, 0x15, 0x2d, 0x23, 0x1b, 0x4a, 0x2c, 0x3d, 0x3e, 0xd8, 0xd5, + 0x74, 0x94, 0x49, 0x1f, 0x6f, 0x00, 0x95, 0xf4, 0xd0, 0xf6, 0xdd, 0xe4, 0xd4, 0x5f, 0x14, 0x28, + 0xa7, 0x86, 0x30, 0xde, 0xfe, 0x89, 0xe3, 0xb8, 0x2f, 0x0d, 0xe2, 0xd8, 0x24, 0x88, 0x92, 0x02, + 0x04, 0xd4, 0xe0, 0xc8, 0x79, 0x0f, 0xed, 0x3b, 0x71, 0xfe, 0x0f, 0x0a, 0xa0, 0xe9, 0x01, 0x6e, + 0xca, 0x41, 0xe5, 0x7b, 0x75, 0xf0, 0xf7, 0x0a, 0x54, 0x27, 0xa7, 0xb6, 0x29, 0xf7, 0xae, 0x7d, + 0xaf, 0xee, 0xfd, 0x4e, 0x81, 0xe5, 0x89, 0x59, 0xed, 0xff, 0xca, 0xbb, 0xdf, 0x66, 0x61, 0x6d, + 0x8e, 0x1e, 0x6e, 0x44, 0x43, 0xad, 0x9c, 0xb3, 0x7f, 0x78, 0x9e, 0x67, 0xd5, 0x79, 0xcf, 0xec, + 0x12, 0x3f, 0x8c, 0x66, 0xe0, 0x5b, 0x80, 0x6c, 0x8b, 0xb2, 0xd0, 0xee, 0xdb, 0xd4, 0x8f, 0x3e, + 0xc4, 0xe5, 0xa4, 0xbb, 0x32, 0xc6, 0xe5, 0xb7, 0xf8, 0x0f, 0x00, 0x7b, 0x6e, 0x60, 0x87, 0xf6, + 0x0b, 0x6a, 0xd8, 0x2c, 0xfe, 0x6a, 0xe7, 0x93, 0x6f, 0x4e, 0x47, 0xb1, 0xa4, 0xc5, 0xc2, 0x84, + 0xcd, 0xe8, 0x80, 0x4c, 0xb1, 0x79, 0xed, 0xcb, 0xea, 0x28, 0x96, 0x24, 0xec, 0x6b, 0x50, 0xb1, + 0xdc, 0x11, 0x1f, 0x22, 0x24, 0x8f, 0x97, 0x5a, 0x45, 0x2f, 0x4b, 0x2c, 0xa1, 0x44, 0x53, 0xde, + 0xf8, 0xba, 0xa0, 0xa2, 0x97, 0x25, 0x26, 0x29, 0x37, 0x61, 0x85, 0x0c, 0x06, 0x3e, 0x37, 0x1e, + 0x1b, 0x92, 0xa3, 0x6b, 0x35, 0x81, 0x05, 0x71, 0xe3, 0x09, 0x14, 0xe3, 0x38, 0xf0, 0x6e, 0xc6, + 0x23, 0x61, 0x78, 0xf2, 0xd2, 0x26, 0xb3, 0x55, 0xd2, 0x8b, 0x2c, 0x16, 0x5e, 0x83, 0x8a, 0x1d, + 0x18, 0xe3, 0xdb, 0xc3, 0xcc, 0x66, 0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xb9, 0x2e, 0xaa, 0x7d, + 0x95, 0x81, 0xea, 0xe4, 0xed, 0x27, 0x6e, 0x42, 0xd1, 0x71, 0x4d, 0x22, 0x12, 0x41, 0x5e, 0xbd, + 0x6f, 0xbd, 0xe1, 0xc2, 0xb4, 0xde, 0x8e, 0xf8, 0x7a, 0xa2, 0xb9, 0xf1, 0x37, 0x05, 0x8a, 0x31, + 0x8c, 0x2f, 0x40, 0xce, 0x23, 0xe1, 0xa9, 0x30, 0x97, 0xdf, 0xcd, 0x20, 0x45, 0x17, 0x6b, 0x8e, + 0x07, 0x1e, 0x61, 0x22, 0x05, 0x22, 0x9c, 0xaf, 0xf9, 0xb9, 0x3a, 0x94, 0x58, 0x62, 0x28, 0x76, + 0x87, 0x43, 0xca, 0xc2, 0x20, 0x3e, 0xd7, 0x08, 0xdf, 0x8b, 0x60, 0x7c, 0x07, 0x56, 0x43, 0x9f, + 0xd8, 0xce, 0x04, 0x37, 0x27, 0xb8, 0x28, 0x16, 0x24, 0xe4, 0x1d, 0xb8, 0x14, 0xdb, 0xb5, 0x68, + 0x48, 0xcc, 0x53, 0x6a, 0x8d, 0x95, 0x0a, 0xe2, 0x6a, 0xed, 0x62, 0x44, 0x68, 0x46, 0xf2, 0x58, + 0xb7, 0xf6, 0x77, 0x05, 0x56, 0xe3, 0x31, 0xde, 0x4a, 0x82, 0x75, 0x00, 0x40, 0x18, 0x73, 0xc3, + 0x74, 0xb8, 0x66, 0x53, 0x79, 0x46, 0xaf, 0xde, 0x48, 0x94, 0xf4, 0x94, 0x81, 0x8d, 0x21, 0xc0, + 0x58, 0xb2, 0x30, 0x6c, 0x57, 0xa1, 0x1c, 0x5d, 0x6d, 0x8b, 0xff, 0x8f, 0xc8, 0x6f, 0x3f, 0x90, + 0x10, 0x9f, 0xf7, 0xf1, 0x3a, 0xe4, 0x4f, 0xe8, 0xc0, 0x66, 0xd1, 0x85, 0x9b, 0x5c, 0xc4, 0xd7, + 0x78, 0xb9, 0xe4, 0x1a, 0x6f, 0xf7, 0xe7, 0xb0, 0x66, 0xba, 0xc3, 0x69, 0x77, 0x77, 0xd1, 0xd4, + 0xf7, 0x67, 0xf0, 0xa9, 0xf2, 0x39, 0x8c, 0xa7, 0xb3, 0x3f, 0x2a, 0xca, 0x97, 0x99, 0xec, 0x7e, + 0x77, 0xf7, 0x4f, 0x99, 0x8d, 0x7d, 0xa9, 0xda, 0x8d, 0x77, 0xaa, 0xd3, 0xbe, 0x43, 0x4d, 0xee, + 0xfd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x54, 0xc8, 0x7d, 0x07, 0x1a, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.golden deleted file mode 100644 index 302c5686bca..00000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.golden +++ /dev/null @@ -1,1024 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/protobuf/descriptor.proto -// DO NOT EDIT! - -package google_protobuf - -import proto "github.com/golang/protobuf/proto" -import "math" - -// Reference proto and math imports to suppress error if they are not otherwise used. -var _ = proto.GetString -var _ = math.Inf - -type FieldDescriptorProto_Type int32 - -const ( - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -// NewFieldDescriptorProto_Type is deprecated. Use x.Enum() instead. -func NewFieldDescriptorProto_Type(x FieldDescriptorProto_Type) *FieldDescriptorProto_Type { - e := FieldDescriptorProto_Type(x) - return &e -} -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -type FieldDescriptorProto_Label int32 - -const ( - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -// NewFieldDescriptorProto_Label is deprecated. Use x.Enum() instead. -func NewFieldDescriptorProto_Label(x FieldDescriptorProto_Label) *FieldDescriptorProto_Label { - e := FieldDescriptorProto_Label(x) - return &e -} -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -// NewFileOptions_OptimizeMode is deprecated. Use x.Enum() instead. -func NewFileOptions_OptimizeMode(x FileOptions_OptimizeMode) *FileOptions_OptimizeMode { - e := FileOptions_OptimizeMode(x) - return &e -} -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -type FieldOptions_CType int32 - -const ( - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -// NewFieldOptions_CType is deprecated. Use x.Enum() instead. -func NewFieldOptions_CType(x FieldOptions_CType) *FieldOptions_CType { - e := FieldOptions_CType(x) - return &e -} -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -type StreamOptions_TokenUnit int32 - -const ( - StreamOptions_MESSAGE StreamOptions_TokenUnit = 0 - StreamOptions_BYTE StreamOptions_TokenUnit = 1 -) - -var StreamOptions_TokenUnit_name = map[int32]string{ - 0: "MESSAGE", - 1: "BYTE", -} -var StreamOptions_TokenUnit_value = map[string]int32{ - "MESSAGE": 0, - "BYTE": 1, -} - -// NewStreamOptions_TokenUnit is deprecated. Use x.Enum() instead. -func NewStreamOptions_TokenUnit(x StreamOptions_TokenUnit) *StreamOptions_TokenUnit { - e := StreamOptions_TokenUnit(x) - return &e -} -func (x StreamOptions_TokenUnit) Enum() *StreamOptions_TokenUnit { - p := new(StreamOptions_TokenUnit) - *p = x - return p -} -func (x StreamOptions_TokenUnit) String() string { - return proto.EnumName(StreamOptions_TokenUnit_name, int32(x)) -} - -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *FileDescriptorSet) Reset() { *this = FileDescriptorSet{} } -func (this *FileDescriptorSet) String() string { return proto.CompactTextString(this) } -func (*FileDescriptorSet) ProtoMessage() {} - -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency" json:"public_dependency,omitempty"` - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency" json:"weak_dependency,omitempty"` - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info" json:"source_code_info,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *FileDescriptorProto) Reset() { *this = FileDescriptorProto{} } -func (this *FileDescriptorProto) String() string { return proto.CompactTextString(this) } -func (*FileDescriptorProto) ProtoMessage() {} - -func (this *FileDescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *FileDescriptorProto) GetPackage() string { - if this != nil && this.Package != nil { - return *this.Package - } - return "" -} - -func (this *FileDescriptorProto) GetOptions() *FileOptions { - if this != nil { - return this.Options - } - return nil -} - -func (this *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if this != nil { - return this.SourceCodeInfo - } - return nil -} - -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range" json:"extension_range,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *DescriptorProto) Reset() { *this = DescriptorProto{} } -func (this *DescriptorProto) String() string { return proto.CompactTextString(this) } -func (*DescriptorProto) ProtoMessage() {} - -func (this *DescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *DescriptorProto) GetOptions() *MessageOptions { - if this != nil { - return this.Options - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *DescriptorProto_ExtensionRange) Reset() { *this = DescriptorProto_ExtensionRange{} } -func (this *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(this) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} - -func (this *DescriptorProto_ExtensionRange) GetStart() int32 { - if this != nil && this.Start != nil { - return *this.Start - } - return 0 -} - -func (this *DescriptorProto_ExtensionRange) GetEnd() int32 { - if this != nil && this.End != nil { - return *this.End - } - return 0 -} - -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=proto2.FieldDescriptorProto_Label" json:"label,omitempty"` - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=proto2.FieldDescriptorProto_Type" json:"type,omitempty"` - TypeName *string `protobuf:"bytes,6,opt,name=type_name" json:"type_name,omitempty"` - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value" json:"default_value,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *FieldDescriptorProto) Reset() { *this = FieldDescriptorProto{} } -func (this *FieldDescriptorProto) String() string { return proto.CompactTextString(this) } -func (*FieldDescriptorProto) ProtoMessage() {} - -func (this *FieldDescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *FieldDescriptorProto) GetNumber() int32 { - if this != nil && this.Number != nil { - return *this.Number - } - return 0 -} - -func (this *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if this != nil && this.Label != nil { - return *this.Label - } - return 0 -} - -func (this *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if this != nil && this.Type != nil { - return *this.Type - } - return 0 -} - -func (this *FieldDescriptorProto) GetTypeName() string { - if this != nil && this.TypeName != nil { - return *this.TypeName - } - return "" -} - -func (this *FieldDescriptorProto) GetExtendee() string { - if this != nil && this.Extendee != nil { - return *this.Extendee - } - return "" -} - -func (this *FieldDescriptorProto) GetDefaultValue() string { - if this != nil && this.DefaultValue != nil { - return *this.DefaultValue - } - return "" -} - -func (this *FieldDescriptorProto) GetOptions() *FieldOptions { - if this != nil { - return this.Options - } - return nil -} - -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *EnumDescriptorProto) Reset() { *this = EnumDescriptorProto{} } -func (this *EnumDescriptorProto) String() string { return proto.CompactTextString(this) } -func (*EnumDescriptorProto) ProtoMessage() {} - -func (this *EnumDescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *EnumDescriptorProto) GetOptions() *EnumOptions { - if this != nil { - return this.Options - } - return nil -} - -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *EnumValueDescriptorProto) Reset() { *this = EnumValueDescriptorProto{} } -func (this *EnumValueDescriptorProto) String() string { return proto.CompactTextString(this) } -func (*EnumValueDescriptorProto) ProtoMessage() {} - -func (this *EnumValueDescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *EnumValueDescriptorProto) GetNumber() int32 { - if this != nil && this.Number != nil { - return *this.Number - } - return 0 -} - -func (this *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if this != nil { - return this.Options - } - return nil -} - -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Stream []*StreamDescriptorProto `protobuf:"bytes,4,rep,name=stream" json:"stream,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *ServiceDescriptorProto) Reset() { *this = ServiceDescriptorProto{} } -func (this *ServiceDescriptorProto) String() string { return proto.CompactTextString(this) } -func (*ServiceDescriptorProto) ProtoMessage() {} - -func (this *ServiceDescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if this != nil { - return this.Options - } - return nil -} - -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - InputType *string `protobuf:"bytes,2,opt,name=input_type" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *MethodDescriptorProto) Reset() { *this = MethodDescriptorProto{} } -func (this *MethodDescriptorProto) String() string { return proto.CompactTextString(this) } -func (*MethodDescriptorProto) ProtoMessage() {} - -func (this *MethodDescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *MethodDescriptorProto) GetInputType() string { - if this != nil && this.InputType != nil { - return *this.InputType - } - return "" -} - -func (this *MethodDescriptorProto) GetOutputType() string { - if this != nil && this.OutputType != nil { - return *this.OutputType - } - return "" -} - -func (this *MethodDescriptorProto) GetOptions() *MethodOptions { - if this != nil { - return this.Options - } - return nil -} - -type StreamDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - ClientMessageType *string `protobuf:"bytes,2,opt,name=client_message_type" json:"client_message_type,omitempty"` - ServerMessageType *string `protobuf:"bytes,3,opt,name=server_message_type" json:"server_message_type,omitempty"` - Options *StreamOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *StreamDescriptorProto) Reset() { *this = StreamDescriptorProto{} } -func (this *StreamDescriptorProto) String() string { return proto.CompactTextString(this) } -func (*StreamDescriptorProto) ProtoMessage() {} - -func (this *StreamDescriptorProto) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *StreamDescriptorProto) GetClientMessageType() string { - if this != nil && this.ClientMessageType != nil { - return *this.ClientMessageType - } - return "" -} - -func (this *StreamDescriptorProto) GetServerMessageType() string { - if this != nil && this.ServerMessageType != nil { - return *this.ServerMessageType - } - return "" -} - -func (this *StreamDescriptorProto) GetOptions() *StreamOptions { - if this != nil { - return this.Options - } - return nil -} - -type FileOptions struct { - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package" json:"java_package,omitempty"` - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname" json:"java_outer_classname,omitempty"` - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,def=0" json:"java_multiple_files,omitempty"` - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,def=0" json:"java_generate_equals_and_hash,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,enum=proto2.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - GoPackage *string `protobuf:"bytes,11,opt,name=go_package" json:"go_package,omitempty"` - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,def=0" json:"py_generic_services,omitempty"` - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *FileOptions) Reset() { *this = FileOptions{} } -func (this *FileOptions) String() string { return proto.CompactTextString(this) } -func (*FileOptions) ProtoMessage() {} - -var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} -func (this *FileOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaGenerateEqualsAndHash bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false - -func (this *FileOptions) GetJavaPackage() string { - if this != nil && this.JavaPackage != nil { - return *this.JavaPackage - } - return "" -} - -func (this *FileOptions) GetJavaOuterClassname() string { - if this != nil && this.JavaOuterClassname != nil { - return *this.JavaOuterClassname - } - return "" -} - -func (this *FileOptions) GetJavaMultipleFiles() bool { - if this != nil && this.JavaMultipleFiles != nil { - return *this.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -func (this *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if this != nil && this.JavaGenerateEqualsAndHash != nil { - return *this.JavaGenerateEqualsAndHash - } - return Default_FileOptions_JavaGenerateEqualsAndHash -} - -func (this *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if this != nil && this.OptimizeFor != nil { - return *this.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (this *FileOptions) GetGoPackage() string { - if this != nil && this.GoPackage != nil { - return *this.GoPackage - } - return "" -} - -func (this *FileOptions) GetCcGenericServices() bool { - if this != nil && this.CcGenericServices != nil { - return *this.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (this *FileOptions) GetJavaGenericServices() bool { - if this != nil && this.JavaGenericServices != nil { - return *this.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (this *FileOptions) GetPyGenericServices() bool { - if this != nil && this.PyGenericServices != nil { - return *this.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -type MessageOptions struct { - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,def=0" json:"message_set_wire_format,omitempty"` - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *MessageOptions) Reset() { *this = MessageOptions{} } -func (this *MessageOptions) String() string { return proto.CompactTextString(this) } -func (*MessageOptions) ProtoMessage() {} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} -func (this *MessageOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false - -func (this *MessageOptions) GetMessageSetWireFormat() bool { - if this != nil && this.MessageSetWireFormat != nil { - return *this.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (this *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if this != nil && this.NoStandardDescriptorAccessor != nil { - return *this.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -type FieldOptions struct { - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=proto2.FieldOptions_CType,def=0" json:"ctype,omitempty"` - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - ExperimentalMapKey *string `protobuf:"bytes,9,opt,name=experimental_map_key" json:"experimental_map_key,omitempty"` - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *FieldOptions) Reset() { *this = FieldOptions{} } -func (this *FieldOptions) String() string { return proto.CompactTextString(this) } -func (*FieldOptions) ProtoMessage() {} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} -func (this *FieldOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (this *FieldOptions) GetCtype() FieldOptions_CType { - if this != nil && this.Ctype != nil { - return *this.Ctype - } - return Default_FieldOptions_Ctype -} - -func (this *FieldOptions) GetPacked() bool { - if this != nil && this.Packed != nil { - return *this.Packed - } - return false -} - -func (this *FieldOptions) GetLazy() bool { - if this != nil && this.Lazy != nil { - return *this.Lazy - } - return Default_FieldOptions_Lazy -} - -func (this *FieldOptions) GetDeprecated() bool { - if this != nil && this.Deprecated != nil { - return *this.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (this *FieldOptions) GetExperimentalMapKey() string { - if this != nil && this.ExperimentalMapKey != nil { - return *this.ExperimentalMapKey - } - return "" -} - -func (this *FieldOptions) GetWeak() bool { - if this != nil && this.Weak != nil { - return *this.Weak - } - return Default_FieldOptions_Weak -} - -type EnumOptions struct { - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,def=1" json:"allow_alias,omitempty"` - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *EnumOptions) Reset() { *this = EnumOptions{} } -func (this *EnumOptions) String() string { return proto.CompactTextString(this) } -func (*EnumOptions) ProtoMessage() {} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} -func (this *EnumOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -const Default_EnumOptions_AllowAlias bool = true - -func (this *EnumOptions) GetAllowAlias() bool { - if this != nil && this.AllowAlias != nil { - return *this.AllowAlias - } - return Default_EnumOptions_AllowAlias -} - -type EnumValueOptions struct { - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *EnumValueOptions) Reset() { *this = EnumValueOptions{} } -func (this *EnumValueOptions) String() string { return proto.CompactTextString(this) } -func (*EnumValueOptions) ProtoMessage() {} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} -func (this *EnumValueOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -type ServiceOptions struct { - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *ServiceOptions) Reset() { *this = ServiceOptions{} } -func (this *ServiceOptions) String() string { return proto.CompactTextString(this) } -func (*ServiceOptions) ProtoMessage() {} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} -func (this *ServiceOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -type MethodOptions struct { - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *MethodOptions) Reset() { *this = MethodOptions{} } -func (this *MethodOptions) String() string { return proto.CompactTextString(this) } -func (*MethodOptions) ProtoMessage() {} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} -func (this *MethodOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -type StreamOptions struct { - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *StreamOptions) Reset() { *this = StreamOptions{} } -func (this *StreamOptions) String() string { return proto.CompactTextString(this) } -func (*StreamOptions) ProtoMessage() {} - -var extRange_StreamOptions = []proto.ExtensionRange{ - {1000, 536870911}, -} - -func (*StreamOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_StreamOptions -} -func (this *StreamOptions) ExtensionMap() map[int32]proto.Extension { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32]proto.Extension) - } - return this.XXX_extensions -} - -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *UninterpretedOption) Reset() { *this = UninterpretedOption{} } -func (this *UninterpretedOption) String() string { return proto.CompactTextString(this) } -func (*UninterpretedOption) ProtoMessage() {} - -func (this *UninterpretedOption) GetIdentifierValue() string { - if this != nil && this.IdentifierValue != nil { - return *this.IdentifierValue - } - return "" -} - -func (this *UninterpretedOption) GetPositiveIntValue() uint64 { - if this != nil && this.PositiveIntValue != nil { - return *this.PositiveIntValue - } - return 0 -} - -func (this *UninterpretedOption) GetNegativeIntValue() int64 { - if this != nil && this.NegativeIntValue != nil { - return *this.NegativeIntValue - } - return 0 -} - -func (this *UninterpretedOption) GetDoubleValue() float64 { - if this != nil && this.DoubleValue != nil { - return *this.DoubleValue - } - return 0 -} - -func (this *UninterpretedOption) GetStringValue() []byte { - if this != nil { - return this.StringValue - } - return nil -} - -func (this *UninterpretedOption) GetAggregateValue() string { - if this != nil && this.AggregateValue != nil { - return *this.AggregateValue - } - return "" -} - -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *UninterpretedOption_NamePart) Reset() { *this = UninterpretedOption_NamePart{} } -func (this *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(this) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} - -func (this *UninterpretedOption_NamePart) GetNamePart() string { - if this != nil && this.NamePart != nil { - return *this.NamePart - } - return "" -} - -func (this *UninterpretedOption_NamePart) GetIsExtension() bool { - if this != nil && this.IsExtension != nil { - return *this.IsExtension - } - return false -} - -type SourceCodeInfo struct { - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *SourceCodeInfo) Reset() { *this = SourceCodeInfo{} } -func (this *SourceCodeInfo) String() string { return proto.CompactTextString(this) } -func (*SourceCodeInfo) ProtoMessage() {} - -type SourceCodeInfo_Location struct { - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *SourceCodeInfo_Location) Reset() { *this = SourceCodeInfo_Location{} } -func (this *SourceCodeInfo_Location) String() string { return proto.CompactTextString(this) } -func (*SourceCodeInfo_Location) ProtoMessage() {} - -func init() { - proto.RegisterEnum("google_protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google_protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google_protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google_protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google_protobuf.StreamOptions_TokenUnit", StreamOptions_TokenUnit_name, StreamOptions_TokenUnit_value) -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go index b47492a76b3..4d7d19b677a 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -39,6 +39,7 @@ package generator import ( "bufio" "bytes" + "compress/gzip" "fmt" "go/parser" "go/printer" @@ -57,6 +58,12 @@ import ( plugin "github.com/golang/protobuf/protoc-gen-go/plugin" ) +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 2 + // A Plugin provides functionality to add to the output during Go code generation, // such as to produce RPC stubs. type Plugin interface { @@ -257,6 +264,36 @@ type FileDescriptor struct { // PackageName is the package name we'll use in the generated code to refer to this file. func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { return fmt.Sprintf("fileDescriptor%d", d.index) } + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) { + pkg = d.GetOptions().GetGoPackage() + if pkg == "" { + return + } + ok = true + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(pkg, "/") + if slash < 0 { + return + } + impPath, pkg = pkg, pkg[slash+1:] + // A semicolon-delimited suffix overrides the package name. + sc := strings.IndexByte(impPath, ';') + if sc < 0 { + return + } + impPath, pkg = impPath[:sc], impPath[sc+1:] + return +} + // goPackageName returns the Go package name to use in the // generated Go file. The result explicit reports whether the name // came from an option go_package statement. If explicit is false, @@ -264,10 +301,8 @@ func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDes // or the input file name. func (d *FileDescriptor) goPackageName() (name string, explicit bool) { // Does the file have a "go_package" option? - if opts := d.Options; opts != nil { - if pkg := opts.GetGoPackage(); pkg != "" { - return pkg, true - } + if _, pkg, ok := d.goPackageOption(); ok { + return pkg, true } // Does the file have a package clause? @@ -278,6 +313,26 @@ func (d *FileDescriptor) goPackageName() (name string, explicit bool) { return baseName(d.GetName()), false } +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName() string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(impPath, name) + return name + } + + return name +} + func (d *FileDescriptor) addExport(obj Object, sym symbol) { d.exported[obj] = append(d.exported[obj], sym) } @@ -313,8 +368,6 @@ func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { if ms.hasExtensions { g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ", "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }") - g.P("func (m *", ms.sym, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension ", - "{ return (*", remoteSym, ")(m).ExtensionMap() }") if ms.isMessageSet { g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ", "{ return (*", remoteSym, ")(m).Marshal() }") @@ -328,25 +381,34 @@ func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { // but they're going to break weirdly for text/JSON. enc := "_" + ms.sym + "_OneofMarshaler" dec := "_" + ms.sym + "_OneofUnmarshaler" + size := "_" + ms.sym + "_OneofSizer" encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" - g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", []interface{}) {") - g.P("return ", enc, ", ", dec, ", nil") + sizeSig := "(msg " + g.Pkg["proto"] + ".Message) int" + g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", ", size, ", nil") g.P("}") g.P("func ", enc, encSig, " {") g.P("m := msg.(*", ms.sym, ")") g.P("m0 := (*", remoteSym, ")(m)") - g.P("enc, _, _ := m0.XXX_OneofFuncs()") + g.P("enc, _, _, _ := m0.XXX_OneofFuncs()") g.P("return enc(m0, b)") g.P("}") g.P("func ", dec, decSig, " {") g.P("m := msg.(*", ms.sym, ")") g.P("m0 := (*", remoteSym, ")(m)") - g.P("_, dec, _ := m0.XXX_OneofFuncs()") + g.P("_, dec, _, _ := m0.XXX_OneofFuncs()") g.P("return dec(m0, tag, wire, b)") g.P("}") + + g.P("func ", size, sizeSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("_, _, size, _ := m0.XXX_OneofFuncs()") + g.P("return size(m0)") + g.P("}") } for _, get := range ms.getters { @@ -496,18 +558,20 @@ type Generator struct { Param map[string]string // Command-line parameters. PackageImportPath string // Go import path of the package we're generating code for ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from import name to generated name + ImportMap map[string]string // Mapping from .proto file name to import path Pkg map[string]string // The names under which we import support packages - packageName string // What we're calling ourselves. - allFiles []*FileDescriptor // All files in the tree - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - usedPackages map[string]bool // Names of packages used in current file. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. + packageName string // What we're calling ourselves. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + usedPackages map[string]bool // Names of packages used in current file. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. indent string + writeOutput bool } // New creates a new generator and allocates the request and response protobufs. @@ -742,42 +806,40 @@ AllFiles: // and FileDescriptorProtos into file-referenced objects within the Generator. // It also creates the list of files to generate and so should be called before GenerateAllFiles. func (g *Generator) WrapTypes() { - g.allFiles = make([]*FileDescriptor, len(g.Request.ProtoFile)) - for i, f := range g.Request.ProtoFile { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + for _, f := range g.Request.ProtoFile { // We must wrap the descriptors before we wrap the enums descs := wrapDescriptors(f) g.buildNestedDescriptors(descs) enums := wrapEnumDescriptors(f, descs) g.buildNestedEnums(descs, enums) exts := wrapExtensions(f) - imps := wrapImported(f, g) fd := &FileDescriptor{ FileDescriptorProto: f, desc: descs, enum: enums, ext: exts, - imp: imps, exported: make(map[Object][]symbol), proto3: fileIsProto3(f), } extractComments(fd) - g.allFiles[i] = fd - } - - g.genFiles = make([]*FileDescriptor, len(g.Request.FileToGenerate)) -FindFiles: - for i, fileName := range g.Request.FileToGenerate { - // Search the list. This algorithm is n^2 but n is tiny. - for _, file := range g.allFiles { - if fileName == file.GetName() { - g.genFiles[i] = file - file.index = i - continue FindFiles - } + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd.FileDescriptorProto, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) } - g.Fail("could not find file named", fileName) + fd.index = len(g.genFiles) + g.genFiles = append(g.genFiles, fd) } - g.Response.File = make([]*plugin.CodeGeneratorResponse_File, len(g.genFiles)) } // Scan the descriptors in this file. For each one, build the slice of nested descriptors @@ -841,9 +903,8 @@ func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *d } } - d.ext = make([]*ExtensionDescriptor, len(desc.Extension)) - for i, field := range desc.Extension { - d.ext[i] = &ExtensionDescriptor{common{file}, field, d} + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) } return d @@ -902,9 +963,9 @@ func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descript // Return a slice of all the top-level ExtensionDescriptors defined within this file. func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { - sl := make([]*ExtensionDescriptor, len(file.Extension)) - for i, field := range file.Extension { - sl[i] = &ExtensionDescriptor{common{file}, field, nil} + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) } return sl } @@ -1013,6 +1074,9 @@ func (g *Generator) ObjectNamed(typeName string) Object { // P prints the arguments to the generated output. It handles strings and int32s, plus // handling indirections because they may be *string, etc. func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } g.WriteString(g.indent) for _, v := range str { switch s := v.(type) { @@ -1021,19 +1085,19 @@ func (g *Generator) P(str ...interface{}) { case *string: g.WriteString(*s) case bool: - g.WriteString(fmt.Sprintf("%t", s)) + fmt.Fprintf(g, "%t", s) case *bool: - g.WriteString(fmt.Sprintf("%t", *s)) + fmt.Fprintf(g, "%t", *s) case int: - g.WriteString(fmt.Sprintf("%d", s)) + fmt.Fprintf(g, "%d", s) case *int32: - g.WriteString(fmt.Sprintf("%d", *s)) + fmt.Fprintf(g, "%d", *s) case *int64: - g.WriteString(fmt.Sprintf("%d", *s)) + fmt.Fprintf(g, "%d", *s) case float64: - g.WriteString(fmt.Sprintf("%g", s)) + fmt.Fprintf(g, "%g", s) case *float64: - g.WriteString(fmt.Sprintf("%g", *s)) + fmt.Fprintf(g, "%g", *s) default: g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) } @@ -1070,17 +1134,17 @@ func (g *Generator) GenerateAllFiles() { for _, file := range g.genFiles { genFileMap[file] = true } - i := 0 for _, file := range g.allFiles { g.Reset() + g.writeOutput = genFileMap[file] g.generate(file) - if _, ok := genFileMap[file]; !ok { + if !g.writeOutput { continue } - g.Response.File[i] = new(plugin.CodeGeneratorResponse_File) - g.Response.File[i].Name = proto.String(goFileName(*file.Name)) - g.Response.File[i].Content = proto.String(g.String()) - i++ + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName()), + Content: proto.String(g.String()), + }) } } @@ -1108,6 +1172,16 @@ func (g *Generator) generate(file *FileDescriptor) { g.file = g.FileOf(file.FileDescriptorProto) g.usedPackages = make(map[string]bool) + if g.file.index == 0 { + // For one file in the package, assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + } + for _, td := range g.file.imp { g.generateImported(td) } @@ -1129,11 +1203,16 @@ func (g *Generator) generate(file *FileDescriptor) { // Run the plugins before the imports so we know which imports are necessary. g.runPlugins(file) + g.generateFileDescriptor(file) + // Generate header and imports last, though they appear first in the output. rem := g.Buffer g.Buffer = new(bytes.Buffer) g.generateHeader() g.generateImports() + if !g.writeOutput { + return + } g.Write(rem.Bytes()) // Reformat generated code. @@ -1211,6 +1290,9 @@ func (g *Generator) generateHeader() { // It returns an indication of whether any comments were printed. // See descriptor.proto for its format. func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } if loc, ok := g.file.comments[path]; ok { text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") for _, line := range strings.Split(text, "\n") { @@ -1222,12 +1304,7 @@ func (g *Generator) PrintComments(path string) bool { } func (g *Generator) fileByName(filename string) *FileDescriptor { - for _, fd := range g.allFiles { - if fd.GetName() == filename { - return fd - } - } - return nil + return g.allFilesByName[filename] } // weak returns whether the ith import of the current file is a weak import. @@ -1254,7 +1331,7 @@ func (g *Generator) generateImports() { if fd.PackageName() == g.packageName { continue } - filename := goFileName(s) + filename := fd.goFileName() // By default, import path is the dirname of the Go filename. importPath := path.Dir(filename) if substitution, ok := g.ImportMap[s]; ok { @@ -1266,15 +1343,14 @@ func (g *Generator) generateImports() { g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath)) continue } - if _, ok := g.usedPackages[fd.PackageName()]; ok { - g.P("import ", fd.PackageName(), " ", strconv.Quote(importPath)) - } else { - // TODO: Re-enable this when we are more feature-complete. - // For instance, some protos use foreign field extensions, which we don't support. - // Until then, this is just annoying spam. - //log.Printf("protoc-gen-go: discarding unused import from %v: %v", *g.file.Name, s) - g.P("// discarding unused import ", fd.PackageName(), " ", strconv.Quote(importPath)) + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + pname := fd.PackageName() + if _, ok := g.usedPackages[pname]; !ok { + pname = "_" } + g.P("import ", pname, " ", strconv.Quote(importPath)) } g.P() // TODO: may need to worry about uniqueness across plugins @@ -1389,6 +1465,17 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) { g.P("}") } + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + } + g.P() } @@ -1464,7 +1551,11 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor enum += CamelCaseSlice(obj.TypeName()) } packed := "" - if field.Options != nil && field.Options.GetPacked() { + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && isScalar(field)) { packed = ",packed" } fieldName := field.GetName() @@ -1479,6 +1570,11 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor name = name[i+1:] } } + if json := field.GetJsonName(); json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } name = ",name=" + name if message.proto3() { // We only need the extra tag for []byte fields; @@ -1597,7 +1693,8 @@ func (g *Generator) RecordTypeUse(t string) { } // Method names that may be generated. Fields with these names get an -// underscore appended. +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. var methodNames = [...]string{ "Reset", "String", @@ -1609,6 +1706,28 @@ var methodNames = [...]string{ "Descriptor", } +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + // Generate the type and default constant definitions for this Descriptor. func (g *Generator) generateMessage(message *Descriptor) { // The full type name @@ -1760,7 +1879,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.RecordTypeUse(field.GetTypeName()) } if len(message.ExtensionRange) > 0 { - g.P("XXX_extensions\t\tmap[int32]", g.Pkg["proto"], ".Extension `json:\"-\"`") + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`") } if !message.proto3() { g.P("XXX_unrecognized\t[]byte `json:\"-\"`") @@ -1789,6 +1908,16 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }") g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") g.P("func (*", ccTypeName, ") ProtoMessage() {}") + var indexes []string + for m := message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] { + g.P("func (*", ccTypeName, `) XXX_WellKnownType() string { return "`, message.GetName(), `" }`) + } // Extension support methods var hasExtensions, isMessageSet bool @@ -1800,22 +1929,22 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P() g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {") g.In() - g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(m.ExtensionMap())") + g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(&m.XXX_InternalExtensions)") g.Out() g.P("}") g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {") g.In() - g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, m.ExtensionMap())") + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)") g.Out() g.P("}") g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {") g.In() - g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(m.XXX_extensions)") + g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)") g.Out() g.P("}") g.P("func (m *", ccTypeName, ") UnmarshalJSON(buf []byte) error {") g.In() - g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, m.XXX_extensions)") + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)") g.Out() g.P("}") g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler") @@ -1837,16 +1966,6 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("return extRange_", ccTypeName) g.Out() g.P("}") - g.P("func (m *", ccTypeName, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension {") - g.In() - g.P("if m.XXX_extensions == nil {") - g.In() - g.P("m.XXX_extensions = make(map[int32]", g.Pkg["proto"], ".Extension)") - g.Out() - g.P("}") - g.P("return m.XXX_extensions") - g.Out() - g.P("}") } // Default constants @@ -2101,12 +2220,14 @@ func (g *Generator) generateMessage(message *Descriptor) { // method enc := "_" + ccTypeName + "_OneofMarshaler" dec := "_" + ccTypeName + "_OneofUnmarshaler" + size := "_" + ccTypeName + "_OneofSizer" encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" + sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)" g.P("// XXX_OneofFuncs is for the internal use of the proto package.") - g.P("func (*", ccTypeName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", []interface{}) {") - g.P("return ", enc, ", ", dec, ", []interface{}{") + g.P("func (*", ccTypeName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{") for _, field := range message.Field { if field.OneofIndex == nil { continue @@ -2293,12 +2414,102 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("}") g.P("}") g.P() + + // sizer + g.P("func ", size, sizeSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + for oi, odp := range message.OneofDecl { + g.P("// ", odp.GetName()) + fname := oneofFieldName[int32(oi)] + g.P("switch x := m.", fname, ".(type) {") + for _, field := range message.Field { + if field.OneofIndex == nil || int(*field.OneofIndex) != oi { + continue + } + g.P("case *", oneofTypeName[field], ":") + val := "x." + fieldNames[field] + var wire, varint, fixed string + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + wire = "WireFixed64" + fixed = "8" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + wire = "WireFixed32" + fixed = "4" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + wire = "WireVarint" + varint = val + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + wire = "WireFixed64" + fixed = "8" + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + wire = "WireFixed32" + fixed = "4" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + wire = "WireVarint" + fixed = "1" + case descriptor.FieldDescriptorProto_TYPE_STRING: + wire = "WireBytes" + fixed = "len(" + val + ")" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_GROUP: + wire = "WireStartGroup" + fixed = g.Pkg["proto"] + ".Size(" + val + ")" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + wire = "WireBytes" + g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") + fixed = "s" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_BYTES: + wire = "WireBytes" + fixed = "len(" + val + ")" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_SINT32: + wire = "WireVarint" + varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + wire = "WireVarint" + varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + if varint != "" { + g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))") + } + if fixed != "" { + g.P("n += ", fixed) + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + } + } + g.P("case nil:") + g.P("default:") + g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))") + g.P("}") + } + g.P("return n") + g.P("}") + g.P() } for _, ext := range message.ext { g.generateExtension(ext) } + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName) } func (g *Generator) generateExtension(ext *ExtensionDescriptor) { @@ -2386,6 +2597,47 @@ func (g *Generator) generateInitFunction() { g.init = nil } +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + g.P("var ", v, " = []byte{") + g.In() + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.Out() + g.P("}") +} + func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { // // We always print the full (proto-world) package name here. pkg := enum.File().GetPackage() @@ -2469,15 +2721,6 @@ func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, // dottedSlice turns a sliced name into a dotted name. func dottedSlice(elem []string) string { return strings.Join(elem, ".") } -// Given a .proto file name, return the output name for the generated Go program. -func goFileName(name string) string { - ext := path.Ext(name) - if ext == ".proto" || ext == ".protodevel" { - name = name[0 : len(name)-len(ext)] - } - return name + ".pb.go" -} - // Is this field optional? func isOptional(field *descriptor.FieldDescriptorProto) bool { return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL @@ -2493,6 +2736,32 @@ func isRepeated(field *descriptor.FieldDescriptorProto) bool { return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED } +// Is this field a scalar numeric type? +func isScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + // badToUnderscore is the mapping function used to generate Go names from package names, // which can be dotted in the input .proto file. It replaces non-identifier characters such as // dot or dash with underscore. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go new file mode 100644 index 00000000000..a5ebc853315 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go @@ -0,0 +1,85 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2013 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package generator + +import ( + "testing" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +func TestCamelCase(t *testing.T) { + tests := []struct { + in, want string + }{ + {"one", "One"}, + {"one_two", "OneTwo"}, + {"_my_field_name_2", "XMyFieldName_2"}, + {"Something_Capped", "Something_Capped"}, + {"my_Name", "My_Name"}, + {"OneTwo", "OneTwo"}, + {"_", "X"}, + {"_a_", "XA_"}, + } + for _, tc := range tests { + if got := CamelCase(tc.in); got != tc.want { + t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want) + } + } +} + +func TestGoPackageOption(t *testing.T) { + tests := []struct { + in string + impPath, pkg string + ok bool + }{ + {"", "", "", false}, + {"foo", "", "foo", true}, + {"github.com/golang/bar", "github.com/golang/bar", "bar", true}, + {"github.com/golang/bar;baz", "github.com/golang/bar", "baz", true}, + } + for _, tc := range tests { + d := &FileDescriptor{ + FileDescriptorProto: &descriptor.FileDescriptorProto{ + Options: &descriptor.FileOptions{ + GoPackage: &tc.in, + }, + }, + } + impPath, pkg, ok := d.goPackageOption() + if impPath != tc.impPath || pkg != tc.pkg || ok != tc.ok { + t.Errorf("go_package = %q => (%q, %q, %t), want (%q, %q, %t)", tc.in, + impPath, pkg, ok, tc.impPath, tc.pkg, tc.ok) + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go similarity index 89% rename from vendor/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go rename to vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go index 2f7f29eba4c..a29a8d2c20d 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -44,6 +44,12 @@ import ( "github.com/golang/protobuf/protoc-gen-go/generator" ) +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// the grpc package is introduced; the generated code references +// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 3 + // Paths for packages used by code generated in this file, // relative to the import_prefix of the generator.Generator. const ( @@ -101,10 +107,18 @@ func (g *grpc) Generate(file *generator.FileDescriptor) { if len(file.FileDescriptorProto.Service) == 0 { return } + g.P("// Reference imports to suppress errors if they are not otherwise used.") g.P("var _ ", contextPkg, ".Context") g.P("var _ ", grpcPkg, ".ClientConn") g.P() + + // Assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) + g.P() + for i, service := range file.FileDescriptorProto.Service { g.generateService(file, service, i) } @@ -134,7 +148,10 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi path := fmt.Sprintf("6,%d", index) // 6 means service. origServName := service.GetName() - fullServName := file.GetPackage() + "." + origServName + fullServName := origServName + if pkg := file.GetPackage(); pkg != "" { + fullServName = pkg + "." + fullServName + } servName := generator.CamelCase(origServName) g.P() @@ -201,7 +218,7 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi // Server handler implementations. var handlerNames []string for _, method := range service.Method { - hname := g.generateServerMethod(servName, method) + hname := g.generateServerMethod(servName, fullServName, method) handlerNames = append(handlerNames, hname) } @@ -237,6 +254,7 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P("},") } g.P("},") + g.P("Metadata: ", file.VarName(), ",") g.P("}") g.P() } @@ -361,19 +379,25 @@ func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescrip return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret } -func (g *grpc) generateServerMethod(servName string, method *pb.MethodDescriptorProto) string { +func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { methName := generator.CamelCase(method.GetName()) hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) inType := g.typeName(method.GetInputType()) outType := g.typeName(method.GetOutputType()) if !method.GetServerStreaming() && !method.GetClientStreaming() { - g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error) (interface{}, error) {") + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") g.P("in := new(", inType, ")") g.P("if err := dec(in); err != nil { return nil, err }") - g.P("out, err := srv.(", servName, "Server).", methName, "(ctx, in)") - g.P("if err != nil { return nil, err }") - g.P("return out, nil") + g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") + g.P("info := &", grpcPkg, ".UnaryServerInfo{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") + g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") g.P("}") g.P() return hname diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go index 24e490ec4bb..532a550050e 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go @@ -31,4 +31,4 @@ package main -import _ "github.com/golang/protobuf/protoc-gen-go/internal/grpc" +import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go index 52bff884879..0ff4e13a8fe 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package google_protobuf_compiler is a generated protocol buffer package. +Package plugin_go is a generated protocol buffer package. It is generated from these files: google/protobuf/compiler/plugin.proto @@ -12,7 +12,7 @@ It has these top-level messages: CodeGeneratorRequest CodeGeneratorResponse */ -package google_protobuf_compiler +package plugin_go import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -24,12 +24,18 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + // An encoded CodeGeneratorRequest is written to the plugin's stdin. type CodeGeneratorRequest struct { // The .proto files that were explicitly listed on the command-line. The // code generator should generate code only for these files. Each file's // descriptor will be included in proto_file, below. - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` // The generator parameter passed on the command-line. Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` // FileDescriptorProtos for all files in files_to_generate and everything @@ -43,13 +49,14 @@ type CodeGeneratorRequest struct { // the entire set into memory at once. However, as of this writing, this // is not similarly optimized on protoc's end -- it will store all fields in // memory at once before sending them to the plugin. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } -func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorRequest) ProtoMessage() {} +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *CodeGeneratorRequest) GetFileToGenerate() []string { if m != nil { @@ -87,9 +94,10 @@ type CodeGeneratorResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } -func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse) ProtoMessage() {} +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *CodeGeneratorResponse) GetError() string { if m != nil && m.Error != nil { @@ -156,15 +164,16 @@ type CodeGeneratorResponse_File struct { // command line. // // If |insertion_point| is present, |name| must also be present. - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` // The file contents. Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } -func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } func (m *CodeGeneratorResponse_File) GetName() string { if m != nil && m.Name != nil { @@ -186,3 +195,35 @@ func (m *CodeGeneratorResponse_File) GetContent() string { } return "" } + +func init() { + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 310 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x51, 0xc1, 0x4a, 0xc3, 0x40, + 0x10, 0x25, 0xb6, 0x22, 0x19, 0xa5, 0x95, 0xa5, 0xc2, 0x52, 0x7a, 0x08, 0x45, 0x31, 0xa7, 0x14, + 0x44, 0xf0, 0xde, 0x8a, 0x7a, 0x2c, 0xc1, 0x93, 0x20, 0x21, 0xa6, 0xd3, 0xb0, 0x90, 0xec, 0xac, + 0xb3, 0xdb, 0x2f, 0xf2, 0x9f, 0xfc, 0x1e, 0xd9, 0x4d, 0x5b, 0xa5, 0xd8, 0xdb, 0xce, 0x7b, 0x6f, + 0xe6, 0xbd, 0x9d, 0x81, 0x9b, 0x9a, 0xa8, 0x6e, 0x70, 0x66, 0x98, 0x1c, 0x7d, 0x6c, 0xd6, 0xb3, + 0x8a, 0x5a, 0xa3, 0x1a, 0xe4, 0x99, 0x69, 0x36, 0xb5, 0xd2, 0x59, 0x20, 0x84, 0xec, 0x64, 0xd9, + 0x4e, 0x96, 0xed, 0x64, 0xe3, 0xe4, 0x70, 0xc0, 0x0a, 0x6d, 0xc5, 0xca, 0x38, 0xe2, 0x4e, 0x3d, + 0xfd, 0x8a, 0x60, 0xb4, 0xa0, 0x15, 0x3e, 0xa3, 0x46, 0x2e, 0x1d, 0x71, 0x8e, 0x9f, 0x1b, 0xb4, + 0x4e, 0xa4, 0x70, 0xb9, 0x56, 0x0d, 0x16, 0x8e, 0x8a, 0xba, 0xe3, 0x50, 0x46, 0x49, 0x2f, 0x8d, + 0xf3, 0x81, 0xc7, 0x5f, 0x69, 0xdb, 0x81, 0x62, 0x02, 0xb1, 0x29, 0xb9, 0x6c, 0xd1, 0x21, 0xcb, + 0x93, 0x24, 0x4a, 0xe3, 0xfc, 0x17, 0x10, 0x0b, 0x80, 0xe0, 0x54, 0xf8, 0x2e, 0x39, 0x4c, 0x7a, + 0xe9, 0xf9, 0xdd, 0x75, 0x76, 0x98, 0xf8, 0x49, 0x35, 0xf8, 0xb8, 0xcf, 0xb6, 0xf4, 0x70, 0x1e, + 0x07, 0xd6, 0x33, 0xd3, 0xef, 0x08, 0xae, 0x0e, 0x52, 0x5a, 0x43, 0xda, 0xa2, 0x18, 0xc1, 0x29, + 0x32, 0x13, 0xcb, 0x28, 0x18, 0x77, 0x85, 0x78, 0x81, 0xfe, 0x1f, 0xbb, 0xfb, 0xec, 0xd8, 0x82, + 0xb2, 0x7f, 0x87, 0x86, 0x34, 0x79, 0x98, 0x30, 0x7e, 0x87, 0xbe, 0xaf, 0x84, 0x80, 0xbe, 0x2e, + 0x5b, 0xdc, 0xda, 0x84, 0xb7, 0xb8, 0x85, 0xa1, 0xd2, 0x16, 0xd9, 0x29, 0xd2, 0x85, 0x21, 0xa5, + 0xdd, 0xf6, 0xfb, 0x83, 0x3d, 0xbc, 0xf4, 0xa8, 0x90, 0x70, 0x56, 0x91, 0x76, 0xa8, 0x9d, 0x1c, + 0x06, 0xc1, 0xae, 0x9c, 0x3f, 0xc0, 0xa4, 0xa2, 0xf6, 0x68, 0xbe, 0xf9, 0xc5, 0x32, 0x1c, 0x3a, + 0x2c, 0xc4, 0xbe, 0xc5, 0xdd, 0xd9, 0x8b, 0x9a, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0x83, 0x7b, + 0x5c, 0x7c, 0x1b, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile new file mode 100644 index 00000000000..105cdec594c --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile @@ -0,0 +1,73 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: + @echo run make test + +include ../../Make.protobuf + +test: golden testbuild + +#test: golden testbuild extension_test +# ./extension_test +# @echo PASS + +my_test/test.pb.go: my_test/test.proto + protoc --go_out=Mmulti/multi1.proto=github.com/golang/protobuf/protoc-gen-go/testdata/multi:. $< + +golden: + make -B my_test/test.pb.go + sed -i '/return.*fileDescriptor/d' my_test/test.pb.go + sed -i '/^var fileDescriptor/,/^}/d' my_test/test.pb.go + sed -i '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go + gofmt -w my_test/test.pb.go + diff -w my_test/test.pb.go my_test/test.pb.go.golden + +nuke: clean + +testbuild: regenerate + go test + +regenerate: + # Invoke protoc once to generate three independent .pb.go files in the same package. + protoc --go_out=. multi/multi{1,2,3}.proto + +#extension_test: extension_test.$O +# $(LD) -L. -o $@ $< + +#multi.a: multi3.pb.$O multi2.pb.$O multi1.pb.$O +# rm -f multi.a +# $(QUOTED_GOBIN)/gopack grc $@ $< + +#test.pb.go: imp.pb.go +#multi1.pb.go: multi2.pb.go multi3.pb.go +#main.$O: imp.pb.$O test.pb.$O multi.a +#extension_test.$O: extension_base.pb.$O extension_extra.pb.$O extension_user.pb.$O diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto new file mode 100644 index 00000000000..94acfc1bc92 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_base; + +message BaseMessage { + optional int32 height = 1; + extensions 4 to 9; + extensions 16 to max; +} + +// Another message that may be extended, using message_set_wire_format. +message OldStyleMessage { + option message_set_wire_format = true; + extensions 100 to max; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto new file mode 100644 index 00000000000..fca7f600cc7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_extra; + +message ExtraMessage { + optional int32 width = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go new file mode 100644 index 00000000000..86e9c118a6f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go @@ -0,0 +1,210 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that we can use protocol buffers that use extensions. + +package testdata + +/* + +import ( + "bytes" + "regexp" + "testing" + + "github.com/golang/protobuf/proto" + base "extension_base.pb" + user "extension_user.pb" +) + +func TestSingleFieldExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(178), + } + + // Use extension within scope of another type. + vol := proto.Uint32(11) + err := proto.SetExtension(bm, user.E_LoudMessage_Volume, vol) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Decoded message didn't contain extension.") + } + vol_out, err := proto.GetExtension(bm_new, user.E_LoudMessage_Volume) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if v := vol_out.(*uint32); *v != *vol { + t.Errorf("vol_out = %v, expected %v", *v, *vol) + } + proto.ClearExtension(bm_new, user.E_LoudMessage_Volume) + if proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + // Use extension that is itself a message. + um := &user.UserMessage{ + Name: proto.String("Dave"), + Rank: proto.String("Major"), + } + err := proto.SetExtension(bm, user.E_LoginMessage_UserMessage, um) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Decoded message didn't contain extension.") + } + um_out, err := proto.GetExtension(bm_new, user.E_LoginMessage_UserMessage) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if n := um_out.(*user.UserMessage).Name; *n != *um.Name { + t.Errorf("um_out.Name = %q, expected %q", *n, *um.Name) + } + if r := um_out.(*user.UserMessage).Rank; *r != *um.Rank { + t.Errorf("um_out.Rank = %q, expected %q", *r, *um.Rank) + } + proto.ClearExtension(bm_new, user.E_LoginMessage_UserMessage) + if proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Failed clearing extension.") + } +} + +func TestTopLevelExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + width := proto.Int32(17) + err := proto.SetExtension(bm, user.E_Width, width) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Decoded message didn't contain extension.") + } + width_out, err := proto.GetExtension(bm_new, user.E_Width) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if w := width_out.(*int32); *w != *width { + t.Errorf("width_out = %v, expected %v", *w, *width) + } + proto.ClearExtension(bm_new, user.E_Width) + if proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageSetWireFormat(t *testing.T) { + osm := new(base.OldStyleMessage) + osp := &user.OldStyleParcel{ + Name: proto.String("Dave"), + Height: proto.Int32(178), + } + + err := proto.SetExtension(osm, user.E_OldStyleParcel_MessageSetExtension, osp) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + + buf, err := proto.Marshal(osm) + if err != nil { + t.Fatal("Failed encoding message:", err) + } + + // Data generated from Python implementation. + expected := []byte{ + 11, 16, 209, 15, 26, 9, 10, 4, 68, 97, 118, 101, 16, 178, 1, 12, + } + + if !bytes.Equal(expected, buf) { + t.Errorf("Encoding mismatch.\nwant %+v\n got %+v", expected, buf) + } + + // Check that it is restored correctly. + osm = new(base.OldStyleMessage) + if err := proto.Unmarshal(buf, osm); err != nil { + t.Fatal("Failed decoding message:", err) + } + osp_out, err := proto.GetExtension(osm, user.E_OldStyleParcel_MessageSetExtension) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + osp = osp_out.(*user.OldStyleParcel) + if *osp.Name != "Dave" || *osp.Height != 178 { + t.Errorf("Retrieved extension from decoded message is not correct: %+v", osp) + } +} + +func main() { + // simpler than rigging up gotest + testing.Main(regexp.MatchString, []testing.InternalTest{ + {"TestSingleFieldExtension", TestSingleFieldExtension}, + {"TestMessageExtension", TestMessageExtension}, + {"TestTopLevelExtension", TestTopLevelExtension}, + }, + []testing.InternalBenchmark{}, + []testing.InternalExample{}) +} + +*/ diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto new file mode 100644 index 00000000000..ff65873dd2d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "extension_base.proto"; +import "extension_extra.proto"; + +package extension_user; + +message UserMessage { + optional string name = 1; + optional string rank = 2; +} + +// Extend with a message +extend extension_base.BaseMessage { + optional UserMessage user_message = 5; +} + +// Extend with a foreign message +extend extension_base.BaseMessage { + optional extension_extra.ExtraMessage extra_message = 9; +} + +// Extend with some primitive types +extend extension_base.BaseMessage { + optional int32 width = 6; + optional int64 area = 7; +} + +// Extend inside the scope of another type +message LoudMessage { + extend extension_base.BaseMessage { + optional uint32 volume = 8; + } + extensions 100 to max; +} + +// Extend inside the scope of another type, using a message. +message LoginMessage { + extend extension_base.BaseMessage { + optional UserMessage user_message = 16; + } +} + +// Extend with a repeated field +extend extension_base.BaseMessage { + repeated Detail detail = 17; +} + +message Detail { + optional string color = 1; +} + +// An extension of an extension +message Announcement { + optional string words = 1; + extend LoudMessage { + optional Announcement loud_ext = 100; + } +} + +// Something that can be put in a message set. +message OldStyleParcel { + extend extension_base.OldStyleMessage { + optional OldStyleParcel message_set_extension = 2001; + } + + required string name = 1; + optional int32 height = 2; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto new file mode 100644 index 00000000000..b8bc41acd87 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto @@ -0,0 +1,59 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message SimpleRequest { +} + +message SimpleResponse { +} + +message StreamMsg { +} + +message StreamMsg2 { +} + +service Test { + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // This RPC streams from the server only. + rpc Downstream(SimpleRequest) returns (stream StreamMsg); + + // This RPC streams from the client. + rpc Upstream(stream StreamMsg) returns (SimpleResponse); + + // This one streams in both directions. + rpc Bidi(stream StreamMsg) returns (stream StreamMsg2); +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden new file mode 100644 index 00000000000..784a4f865a5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. +// source: imp.proto +// DO NOT EDIT! + +package imp + +import proto "github.com/golang/protobuf/proto" +import "math" +import "os" +import imp1 "imp2.pb" + +// Reference proto & math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +// Types from public import imp2.proto +type PubliclyImportedMessage imp1.PubliclyImportedMessage + +func (this *PubliclyImportedMessage) Reset() { (*imp1.PubliclyImportedMessage)(this).Reset() } +func (this *PubliclyImportedMessage) String() string { + return (*imp1.PubliclyImportedMessage)(this).String() +} + +// PubliclyImportedMessage from public import imp.proto + +type ImportedMessage_Owner int32 + +const ( + ImportedMessage_DAVE ImportedMessage_Owner = 1 + ImportedMessage_MIKE ImportedMessage_Owner = 2 +) + +var ImportedMessage_Owner_name = map[int32]string{ + 1: "DAVE", + 2: "MIKE", +} +var ImportedMessage_Owner_value = map[string]int32{ + "DAVE": 1, + "MIKE": 2, +} + +// NewImportedMessage_Owner is deprecated. Use x.Enum() instead. +func NewImportedMessage_Owner(x ImportedMessage_Owner) *ImportedMessage_Owner { + e := ImportedMessage_Owner(x) + return &e +} +func (x ImportedMessage_Owner) Enum() *ImportedMessage_Owner { + p := new(ImportedMessage_Owner) + *p = x + return p +} +func (x ImportedMessage_Owner) String() string { + return proto.EnumName(ImportedMessage_Owner_name, int32(x)) +} + +type ImportedMessage struct { + Field *int64 `protobuf:"varint,1,req,name=field" json:"field,omitempty"` + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedMessage) Reset() { *this = ImportedMessage{} } +func (this *ImportedMessage) String() string { return proto.CompactTextString(this) } + +var extRange_ImportedMessage = []proto.ExtensionRange{ + proto.ExtensionRange{90, 100}, +} + +func (*ImportedMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedMessage +} +func (this *ImportedMessage) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +type ImportedExtendable struct { + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedExtendable) Reset() { *this = ImportedExtendable{} } +func (this *ImportedExtendable) String() string { return proto.CompactTextString(this) } + +func (this *ImportedExtendable) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(this.ExtensionMap()) +} +func (this *ImportedExtendable) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, this.ExtensionMap()) +} +// ensure ImportedExtendable satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*ImportedExtendable)(nil) +var _ proto.Unmarshaler = (*ImportedExtendable)(nil) + +var extRange_ImportedExtendable = []proto.ExtensionRange{ + proto.ExtensionRange{100, 536870911}, +} + +func (*ImportedExtendable) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedExtendable +} +func (this *ImportedExtendable) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +func init() { + proto.RegisterEnum("imp.ImportedMessage_Owner", ImportedMessage_Owner_name, ImportedMessage_Owner_value) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto new file mode 100644 index 00000000000..156e078d125 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto @@ -0,0 +1,70 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +import "imp2.proto"; +import "imp3.proto"; + +message ImportedMessage { + required int64 field = 1; + + // The forwarded getters for these fields are fiddly to get right. + optional ImportedMessage2 local_msg = 2; + optional ForeignImportedMessage foreign_msg = 3; // in imp3.proto + optional Owner enum_field = 4; + oneof union { + int32 state = 9; + } + + repeated string name = 5; + repeated Owner boss = 6; + repeated ImportedMessage2 memo = 7; + + map msg_map = 8; + + enum Owner { + DAVE = 1; + MIKE = 2; + } + + extensions 90 to 100; +} + +message ImportedMessage2 { +} + +message ImportedExtendable { + option message_set_wire_format = true; + extensions 100 to max; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto new file mode 100644 index 00000000000..3bb0632b295 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message PubliclyImportedMessage { + optional int64 field = 1; +} + +enum PubliclyImportedEnum { + GLASSES = 1; + HAIR = 2; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto new file mode 100644 index 00000000000..58fc7598bf9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message ForeignImportedMessage { + optional string tuber = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go new file mode 100644 index 00000000000..f9b5ccf2032 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A simple binary to link together the protocol buffers in this test. + +package testdata + +import ( + "testing" + + mytestpb "./my_test" + multipb "github.com/golang/protobuf/protoc-gen-go/testdata/multi" +) + +func TestLink(t *testing.T) { + _ = &multipb.Multi1{} + _ = &mytestpb.Request{} +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto new file mode 100644 index 00000000000..0da6e0af4b2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto @@ -0,0 +1,44 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "multi/multi2.proto"; +import "multi/multi3.proto"; + +package multitest; + +message Multi1 { + required Multi2 multi2 = 1; + optional Multi2.Color color = 2; + optional Multi3.HatType hat_type = 3; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto new file mode 100644 index 00000000000..e6bfc71b3c0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi2 { + required int32 required_value = 1; + + enum Color { + BLUE = 1; + GREEN = 2; + RED = 3; + }; + optional Color color = 2; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto new file mode 100644 index 00000000000..146c255bd21 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi3 { + enum HatType { + FEDORA = 1; + FEZ = 2; + }; + optional HatType hat_type = 1; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go new file mode 100644 index 00000000000..9ec3e1292e1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go @@ -0,0 +1,866 @@ +// Code generated by protoc-gen-go. +// source: my_test/test.proto +// DO NOT EDIT! + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply + Communique +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + // This field should not conflict with any getters. + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +func (m *Request) GetGetKey_() string { + if m != nil && m.GetKey_ != nil { + return *m.GetKey_ + } + return "" +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Height + // *Communique_Today + // *Communique_Maybe + // *Communique_Delta_ + // *Communique_Msg + // *Communique_Somegroup + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Height struct { + Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` +} +type Communique_Today struct { + Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` +} +type Communique_Maybe struct { + Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` +} +type Communique_Delta_ struct { + Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` +} +type Communique_Msg struct { + Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` +} +type Communique_Somegroup struct { + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Height) isCommunique_Union() {} +func (*Communique_Today) isCommunique_Union() {} +func (*Communique_Maybe) isCommunique_Union() {} +func (*Communique_Delta_) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} +func (*Communique_Somegroup) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetHeight() float32 { + if x, ok := m.GetUnion().(*Communique_Height); ok { + return x.Height + } + return 0 +} + +func (m *Communique) GetToday() Days { + if x, ok := m.GetUnion().(*Communique_Today); ok { + return x.Today + } + return Days_MONDAY +} + +func (m *Communique) GetMaybe() bool { + if x, ok := m.GetUnion().(*Communique_Maybe); ok { + return x.Maybe + } + return false +} + +func (m *Communique) GetDelta() int32 { + if x, ok := m.GetUnion().(*Communique_Delta_); ok { + return x.Delta + } + return 0 +} + +func (m *Communique) GetMsg() *Reply { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +func (m *Communique) GetSomegroup() *Communique_SomeGroup { + if x, ok := m.GetUnion().(*Communique_Somegroup); ok { + return x.Somegroup + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Height)(nil), + (*Communique_Today)(nil), + (*Communique_Maybe)(nil), + (*Communique_Delta_)(nil), + (*Communique_Msg)(nil), + (*Communique_Somegroup)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Height: + b.EncodeVarint(9<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.Height))) + case *Communique_Today: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Today)) + case *Communique_Maybe: + t := uint64(0) + if x.Maybe { + t = 1 + } + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Communique_Delta_: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.Delta)) + case *Communique_Msg: + b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case *Communique_Somegroup: + b.EncodeVarint(14<<3 | proto.WireStartGroup) + if err := b.Marshal(x.Somegroup); err != nil { + return err + } + b.EncodeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.height + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Communique_Height{math.Float32frombits(uint32(x))} + return true, err + case 10: // union.today + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Today{Days(x)} + return true, err + case 11: // union.maybe + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Maybe{x != 0} + return true, err + case 12: // union.delta + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Communique_Delta_{int32(x)} + return true, err + case 13: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Reply) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + case 14: // union.somegroup + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Communique_SomeGroup) + err := b.DecodeGroup(msg) + m.Union = &Communique_Somegroup{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Height: + n += proto.SizeVarint(9<<3 | proto.WireFixed32) + n += 4 + case *Communique_Today: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Today)) + case *Communique_Maybe: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += 1 + case *Communique_Delta_: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Communique_Somegroup: + n += proto.SizeVarint(14<<3 | proto.WireStartGroup) + n += proto.Size(x.Somegroup) + n += proto.SizeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Communique_SomeGroup struct { + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Communique_SomeGroup) ProtoMessage() {} + +func (m *Communique_SomeGroup) GetMember() string { + if m != nil && m.Member != nil { + return *m.Member + } + return "" +} + +type Communique_Delta struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } +func (*Communique_Delta) ProtoMessage() {} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", +} + +func init() { + proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") + proto.RegisterType((*Reply)(nil), "my.test.Reply") + proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") + proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") + proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") + proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") + proto.RegisterType((*OldReply)(nil), "my.test.OldReply") + proto.RegisterType((*Communique)(nil), "my.test.Communique") + proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") + proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden new file mode 100644 index 00000000000..9ec3e1292e1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden @@ -0,0 +1,866 @@ +// Code generated by protoc-gen-go. +// source: my_test/test.proto +// DO NOT EDIT! + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply + Communique +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + // This field should not conflict with any getters. + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +func (m *Request) GetGetKey_() string { + if m != nil && m.GetKey_ != nil { + return *m.GetKey_ + } + return "" +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Height + // *Communique_Today + // *Communique_Maybe + // *Communique_Delta_ + // *Communique_Msg + // *Communique_Somegroup + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Height struct { + Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` +} +type Communique_Today struct { + Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` +} +type Communique_Maybe struct { + Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` +} +type Communique_Delta_ struct { + Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` +} +type Communique_Msg struct { + Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` +} +type Communique_Somegroup struct { + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Height) isCommunique_Union() {} +func (*Communique_Today) isCommunique_Union() {} +func (*Communique_Maybe) isCommunique_Union() {} +func (*Communique_Delta_) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} +func (*Communique_Somegroup) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetHeight() float32 { + if x, ok := m.GetUnion().(*Communique_Height); ok { + return x.Height + } + return 0 +} + +func (m *Communique) GetToday() Days { + if x, ok := m.GetUnion().(*Communique_Today); ok { + return x.Today + } + return Days_MONDAY +} + +func (m *Communique) GetMaybe() bool { + if x, ok := m.GetUnion().(*Communique_Maybe); ok { + return x.Maybe + } + return false +} + +func (m *Communique) GetDelta() int32 { + if x, ok := m.GetUnion().(*Communique_Delta_); ok { + return x.Delta + } + return 0 +} + +func (m *Communique) GetMsg() *Reply { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +func (m *Communique) GetSomegroup() *Communique_SomeGroup { + if x, ok := m.GetUnion().(*Communique_Somegroup); ok { + return x.Somegroup + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Height)(nil), + (*Communique_Today)(nil), + (*Communique_Maybe)(nil), + (*Communique_Delta_)(nil), + (*Communique_Msg)(nil), + (*Communique_Somegroup)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Height: + b.EncodeVarint(9<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.Height))) + case *Communique_Today: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Today)) + case *Communique_Maybe: + t := uint64(0) + if x.Maybe { + t = 1 + } + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Communique_Delta_: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.Delta)) + case *Communique_Msg: + b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case *Communique_Somegroup: + b.EncodeVarint(14<<3 | proto.WireStartGroup) + if err := b.Marshal(x.Somegroup); err != nil { + return err + } + b.EncodeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.height + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Communique_Height{math.Float32frombits(uint32(x))} + return true, err + case 10: // union.today + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Today{Days(x)} + return true, err + case 11: // union.maybe + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Maybe{x != 0} + return true, err + case 12: // union.delta + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Communique_Delta_{int32(x)} + return true, err + case 13: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Reply) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + case 14: // union.somegroup + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Communique_SomeGroup) + err := b.DecodeGroup(msg) + m.Union = &Communique_Somegroup{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Height: + n += proto.SizeVarint(9<<3 | proto.WireFixed32) + n += 4 + case *Communique_Today: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Today)) + case *Communique_Maybe: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += 1 + case *Communique_Delta_: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Communique_Somegroup: + n += proto.SizeVarint(14<<3 | proto.WireStartGroup) + n += proto.Size(x.Somegroup) + n += proto.SizeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Communique_SomeGroup struct { + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Communique_SomeGroup) ProtoMessage() {} + +func (m *Communique_SomeGroup) GetMember() string { + if m != nil && m.Member != nil { + return *m.Member + } + return "" +} + +type Communique_Delta struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } +func (*Communique_Delta) ProtoMessage() {} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", +} + +func init() { + proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") + proto.RegisterType((*Reply)(nil), "my.test.Reply") + proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") + proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") + proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") + proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") + proto.RegisterType((*OldReply)(nil), "my.test.OldReply") + proto.RegisterType((*Communique)(nil), "my.test.Communique") + proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") + proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto new file mode 100644 index 00000000000..8e7094632de --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto @@ -0,0 +1,156 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +// This package holds interesting messages. +package my.test; // dotted package name + +//import "imp.proto"; +import "multi/multi1.proto"; // unused import + +enum HatType { + // deliberately skipping 0 + FEDORA = 1; + FEZ = 2; +} + +// This enum represents days of the week. +enum Days { + option allow_alias = true; + + MONDAY = 1; + TUESDAY = 2; + LUNDI = 1; // same value as MONDAY +} + +// This is a message that might be sent somewhere. +message Request { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + repeated int64 key = 1; +// optional imp.ImportedMessage imported_message = 2; + optional Color hue = 3; // no default + optional HatType hat = 4 [default=FEDORA]; +// optional imp.ImportedMessage.Owner owner = 6; + optional float deadline = 7 [default=inf]; + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // These foreign types are in imp2.proto, + // which is publicly imported by imp.proto. +// optional imp.PubliclyImportedMessage pub = 10; +// optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR]; + + + // This is a map field. It will generate map[int32]string. + map name_mapping = 14; + // This is a map field whose value type is a message. + map msg_mapping = 15; + + optional int32 reset = 12; + // This field should not conflict with any getters. + optional string get_key = 16; +} + +message Reply { + message Entry { + required int64 key_that_needs_1234camel_CasIng = 1; + optional int64 value = 2 [default=7]; + optional int64 _my_field_name_2 = 3; + enum Game { + FOOTBALL = 1; + TENNIS = 2; + } + } + repeated Entry found = 1; + repeated int32 compact_keys = 2 [packed=true]; + extensions 100 to max; +} + +message OtherBase { + optional string name = 1; + extensions 100 to max; +} + +message ReplyExtensions { + extend Reply { + optional double time = 101; + optional ReplyExtensions carrot = 105; + } + extend OtherBase { + optional ReplyExtensions donut = 101; + } +} + +message OtherReplyExtensions { + optional int32 key = 1; +} + +// top-level extension +extend Reply { + optional string tag = 103; + optional OtherReplyExtensions donut = 106; +// optional imp.ImportedMessage elephant = 107; // extend with message from another file. +} + +message OldReply { + // Extensions will be encoded in MessageSet wire format. + option message_set_wire_format = true; + extensions 100 to max; +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + float height = 9; + Days today = 10; + bool maybe = 11; + sint32 delta = 12; // name will conflict with Delta below + Reply msg = 13; + group SomeGroup = 14 { + optional string member = 15; + } + } + + message Delta {} +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto new file mode 100644 index 00000000000..869b9af5a7b --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto @@ -0,0 +1,53 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package proto3; + +message Request { + enum Flavour { + SWEET = 0; + SOUR = 1; + UMAMI = 2; + GOPHERLICIOUS = 3; + } + string name = 1; + repeated int64 key = 2; + Flavour taste = 3; + Book book = 4; + repeated int64 unpacked = 5 [packed=false]; +} + +message Book { + string title = 1; + bytes raw_data = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000000..89e07ae192a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000000..f2c6906b91b --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, + 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, + 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 00000000000..81dcf46ccfd --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,140 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go new file mode 100644 index 00000000000..ed675b489ca --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any_test.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/ptypes/any" +) + +func TestMarshalUnmarshal(t *testing.T) { + orig := &any.Any{Value: []byte("test")} + + packed, err := MarshalAny(orig) + if err != nil { + t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) + } + + unpacked := &any.Any{} + err = UnmarshalAny(packed, unpacked) + if err != nil || !proto.Equal(unpacked, orig) { + t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) + } +} + +func TestIs(t *testing.T) { + a, err := MarshalAny(&pb.FileDescriptorProto{}) + if err != nil { + t.Fatal(err) + } + if Is(a, &pb.DescriptorProto{}) { + t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") + } + if !Is(a, &pb.FileDescriptorProto{}) { + t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") + } +} + +func TestIsDifferentUrlPrefixes(t *testing.T) { + m := &pb.FileDescriptorProto{} + a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} + if !Is(a, m) { + t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) + } +} + +func TestUnmarshalDynamic(t *testing.T) { + want := &pb.FileDescriptorProto{Name: proto.String("foo")} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + var got DynamicAny + if err := UnmarshalAny(a, &got); err != nil { + t.Fatal(err) + } + if !proto.Equal(got.Message, want) { + t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) + } +} + +func TestEmpty(t *testing.T) { + want := &pb.FileDescriptorProto{} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + got, err := Empty(a) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, want) { + t.Errorf("unequal empty message, got %q, want %q", got, want) + } + + // that's a valid type_url for a message which shouldn't be linked into this + // test binary. We want an error. + a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask" + if _, err := Empty(a); err == nil { + t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000000..c0d595da7ab --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000000..65cb0f8eb5f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000000..569748346d4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto +// DO NOT EDIT! + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, + 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, + 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, + 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, + 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 00000000000..96c1796d659 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,98 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go new file mode 100644 index 00000000000..e761289f12c --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration_test.go @@ -0,0 +1,121 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + minGoSeconds = math.MinInt64 / int64(1e9) + maxGoSeconds = math.MaxInt64 / int64(1e9) +) + +var durationTests = []struct { + proto *durpb.Duration + isValid bool + inRange bool + dur time.Duration +}{ + // The zero duration. + {&durpb.Duration{0, 0}, true, true, 0}, + // Some ordinary non-zero durations. + {&durpb.Duration{100, 0}, true, true, 100 * time.Second}, + {&durpb.Duration{-100, 0}, true, true, -100 * time.Second}, + {&durpb.Duration{100, 987}, true, true, 100*time.Second + 987}, + {&durpb.Duration{-100, -987}, true, true, -(100*time.Second + 987)}, + // The largest duration representable in Go. + {&durpb.Duration{maxGoSeconds, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, + // The smallest duration representable in Go. + {&durpb.Duration{minGoSeconds, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, + {nil, false, false, 0}, + {&durpb.Duration{-100, 987}, false, false, 0}, + {&durpb.Duration{100, -987}, false, false, 0}, + {&durpb.Duration{math.MinInt64, 0}, false, false, 0}, + {&durpb.Duration{math.MaxInt64, 0}, false, false, 0}, + // The largest valid duration. + {&durpb.Duration{maxSeconds, 1e9 - 1}, true, false, 0}, + // The smallest valid duration. + {&durpb.Duration{minSeconds, -(1e9 - 1)}, true, false, 0}, + // The smallest invalid duration above the valid range. + {&durpb.Duration{maxSeconds + 1, 0}, false, false, 0}, + // The largest invalid duration below the valid range. + {&durpb.Duration{minSeconds - 1, -(1e9 - 1)}, false, false, 0}, + // One nanosecond past the largest duration representable in Go. + {&durpb.Duration{maxGoSeconds, int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, + // One nanosecond past the smallest duration representable in Go. + {&durpb.Duration{minGoSeconds, int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, + // One second past the largest duration representable in Go. + {&durpb.Duration{maxGoSeconds + 1, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, + // One second past the smallest duration representable in Go. + {&durpb.Duration{minGoSeconds - 1, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, +} + +func TestValidateDuration(t *testing.T) { + for _, test := range durationTests { + err := validateDuration(test.proto) + gotValid := (err == nil) + if gotValid != test.isValid { + t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) + } + } +} + +func TestDuration(t *testing.T) { + for _, test := range durationTests { + got, err := Duration(test.proto) + gotOK := (err == nil) + wantOK := test.isValid && test.inRange + if gotOK != wantOK { + t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK) + } + if err == nil && got != test.dur { + t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur) + } + } +} + +func TestDurationProto(t *testing.T) { + for _, test := range durationTests { + if test.isValid && test.inRange { + got := DurationProto(test.dur) + if !proto.Equal(got, test.proto) { + t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 00000000000..46c765a96eb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,69 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto +// DO NOT EDIT! + +/* +Package empty is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/empty/empty.proto + +It has these top-level messages: + Empty +*/ +package empty + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/empty/empty.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 150 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd, + 0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, + 0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x25, 0x97, 0x70, 0x72, 0x7e, + 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2, + 0xce, 0x05, 0x8c, 0x8c, 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, + 0x73, 0x87, 0x18, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, + 0x17, 0x02, 0xd2, 0x92, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, + 0xf4, 0x0e, 0xd2, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto new file mode 100644 index 00000000000..37f4cd10eee --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto @@ -0,0 +1,53 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh new file mode 100755 index 00000000000..2a5b4e8bdcc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -0,0 +1,66 @@ +#!/bin/bash -e +# +# This script fetches and rebuilds the "well-known types" protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=github.com/golang/protobuf/ptypes +UPSTREAM=https://github.com/google/protobuf +UPSTREAM_SUBDIR=src/google/protobuf +PROTO_FILES=' + any.proto + duration.proto + empty.proto + struct.proto + timestamp.proto + wrappers.proto +' + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir +# Pass 1: build mapping from upstream filename to our filename. +declare -A filename_map +for f in $(cd $PKG && find * -name '*.proto'); do + echo -n 1>&2 "looking for latest version of $f... " + up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/) + echo 1>&2 $up + if [ $(echo $up | wc -w) != "1" ]; then + die "not exactly one match" + fi + filename_map[$up]=$f +done +# Pass 2: copy files +for up in "${!filename_map[@]}"; do + f=${filename_map[$up]} + shortname=$(basename $f | sed 's,\.proto$,,') + cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f +done + +# Run protoc once per package. +for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done +echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 00000000000..197042ed527 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,382 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/struct/struct.proto +// DO NOT EDIT! + +/* +Package structpb is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/struct/struct.proto + +It has these top-level messages: + Struct + Value + ListValue +*/ +package structpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/struct/struct.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 416 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09, + 0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52, + 0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x8a, 0x47, 0x8f, + 0xfe, 0x42, 0x99, 0x99, 0x24, 0x4a, 0x4b, 0xc1, 0xd3, 0xf4, 0xbd, 0xf9, 0xde, 0x37, 0xef, 0xbd, + 0x06, 0x9e, 0x97, 0x15, 0xff, 0x2c, 0x32, 0x3f, 0x67, 0x9b, 0xa0, 0x64, 0x64, 0x41, 0xcb, 0xa0, + 0x6e, 0x18, 0x67, 0x99, 0x58, 0x05, 0x35, 0xdf, 0xd6, 0x45, 0x1b, 0xb4, 0xbc, 0x11, 0x39, 0xef, + 0x0e, 0x5f, 0xdd, 0xe2, 0x3b, 0x25, 0x63, 0x25, 0x29, 0xfc, 0x9e, 0x9d, 0x7e, 0x47, 0x60, 0xbd, + 0x57, 0x04, 0x0e, 0xc1, 0x5a, 0x55, 0x05, 0x59, 0xb6, 0x13, 0xe4, 0x9a, 0x9e, 0x33, 0x3b, 0xf3, + 0x77, 0x60, 0x5f, 0x83, 0xfe, 0x1b, 0x45, 0x9d, 0x53, 0xde, 0x6c, 0xd3, 0xae, 0xe4, 0xf4, 0x1d, + 0x38, 0xff, 0xa4, 0xf1, 0x09, 0x98, 0xeb, 0x62, 0x3b, 0x41, 0x2e, 0xf2, 0xec, 0x54, 0xfe, 0xc4, + 0x4f, 0x60, 0xfc, 0x65, 0x41, 0x44, 0x31, 0x31, 0x5c, 0xe4, 0x39, 0xb3, 0x7b, 0x7b, 0xf2, 0x1b, + 0x79, 0x9b, 0x6a, 0xe8, 0xa5, 0xf1, 0x02, 0x4d, 0x7f, 0x1b, 0x30, 0x56, 0x49, 0x1c, 0x02, 0x50, + 0x41, 0xc8, 0x5c, 0x0b, 0xa4, 0xf4, 0x78, 0x76, 0xba, 0x27, 0xb8, 0x12, 0x84, 0x28, 0xfe, 0x72, + 0x94, 0xda, 0xb4, 0x0f, 0xf0, 0x19, 0xdc, 0xa6, 0x62, 0x93, 0x15, 0xcd, 0xfc, 0xef, 0xfb, 0xe8, + 0x72, 0x94, 0x3a, 0x3a, 0x3b, 0x40, 0x2d, 0x6f, 0x2a, 0x5a, 0x76, 0x90, 0x29, 0x1b, 0x97, 0x90, + 0xce, 0x6a, 0xe8, 0x11, 0x40, 0xc6, 0x58, 0xdf, 0xc6, 0x91, 0x8b, 0xbc, 0x5b, 0xf2, 0x29, 0x99, + 0xd3, 0xc0, 0x2b, 0x65, 0x11, 0x39, 0xef, 0x90, 0xb1, 0x1a, 0xf5, 0xfe, 0x81, 0x3d, 0x76, 0x7a, + 0x91, 0xf3, 0x61, 0x4a, 0x52, 0xb5, 0x7d, 0xad, 0xa5, 0x6a, 0xf7, 0xa7, 0x8c, 0xab, 0x96, 0x0f, + 0x53, 0x92, 0x3e, 0x88, 0x2c, 0x38, 0x5a, 0x57, 0x74, 0x39, 0x0d, 0xc1, 0x1e, 0x08, 0xec, 0x83, + 0xa5, 0x64, 0xfd, 0x3f, 0x7a, 0x68, 0xe9, 0x1d, 0xf5, 0xf8, 0x01, 0xd8, 0xc3, 0x12, 0xf1, 0x31, + 0xc0, 0xd5, 0x75, 0x1c, 0xcf, 0x6f, 0x5e, 0xc7, 0xd7, 0xe7, 0x27, 0xa3, 0xe8, 0x1b, 0x82, 0xbb, + 0x39, 0xdb, 0xec, 0x2a, 0x22, 0x47, 0x4f, 0x93, 0xc8, 0x38, 0x41, 0x9f, 0x9e, 0xfe, 0xef, 0x87, + 0x19, 0xea, 0xa3, 0xce, 0x7e, 0x20, 0xf4, 0xd3, 0x30, 0x2f, 0x92, 0xe8, 0x97, 0xf1, 0xf0, 0x42, + 0xcb, 0x93, 0xbe, 0xbf, 0x8f, 0x05, 0x21, 0x6f, 0x29, 0xfb, 0x4a, 0x3f, 0xc8, 0xca, 0xcc, 0x52, + 0xaa, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 00000000000..beeba811888 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000000..1b365762202 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000000..ffcc51594ac --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +// DO NOT EDIT! + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, + 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, + 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, + 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, + 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 00000000000..7992a85886f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,111 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go new file mode 100644 index 00000000000..114a7f9fcfc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go @@ -0,0 +1,138 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +var tests = []struct { + ts *tspb.Timestamp + valid bool + t time.Time +}{ + // The timestamp representing the Unix epoch date. + {&tspb.Timestamp{0, 0}, true, utcDate(1970, 1, 1)}, + // The smallest representable timestamp. + {&tspb.Timestamp{math.MinInt64, math.MinInt32}, false, + time.Unix(math.MinInt64, math.MinInt32).UTC()}, + // The smallest representable timestamp with non-negative nanos. + {&tspb.Timestamp{math.MinInt64, 0}, false, time.Unix(math.MinInt64, 0).UTC()}, + // The earliest valid timestamp. + {&tspb.Timestamp{minValidSeconds, 0}, true, utcDate(1, 1, 1)}, + //"0001-01-01T00:00:00Z"}, + // The largest representable timestamp. + {&tspb.Timestamp{math.MaxInt64, math.MaxInt32}, false, + time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, + // The largest representable timestamp with nanos in range. + {&tspb.Timestamp{math.MaxInt64, 1e9 - 1}, false, + time.Unix(math.MaxInt64, 1e9-1).UTC()}, + // The largest valid timestamp. + {&tspb.Timestamp{maxValidSeconds - 1, 1e9 - 1}, true, + time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, + // The smallest invalid timestamp that is larger than the valid range. + {&tspb.Timestamp{maxValidSeconds, 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, + // A date before the epoch. + {&tspb.Timestamp{-281836800, 0}, true, utcDate(1961, 1, 26)}, + // A date after the epoch. + {&tspb.Timestamp{1296000000, 0}, true, utcDate(2011, 1, 26)}, + // A date after the epoch, in the middle of the day. + {&tspb.Timestamp{1296012345, 940483}, true, + time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, +} + +func TestValidateTimestamp(t *testing.T) { + for _, s := range tests { + got := validateTimestamp(s.ts) + if (got == nil) != s.valid { + t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) + } + } +} + +func TestTimestamp(t *testing.T) { + for _, s := range tests { + got, err := Timestamp(s.ts) + if (err == nil) != s.valid { + t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid) + } else if s.valid && got != s.t { + t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t) + } + } + // Special case: a nil Timestamp is an error, but returns the 0 Unix time. + got, err := Timestamp(nil) + want := time.Unix(0, 0).UTC() + if got != want { + t.Errorf("Timestamp(nil) = %v, want %v", got, want) + } + if err == nil { + t.Errorf("Timestamp(nil) error = nil, expected error") + } +} + +func TestTimestampProto(t *testing.T) { + for _, s := range tests { + got, err := TimestampProto(s.t) + if (err == nil) != s.valid { + t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) + } else if s.valid && !proto.Equal(got, s.ts) { + t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) + } + } + // No corresponding special case here: no time.Time results in a nil Timestamp. +} + +func TestTimestampString(t *testing.T) { + for _, test := range []struct { + ts *tspb.Timestamp + want string + }{ + // Not much testing needed because presumably time.Format is + // well-tested. + {&tspb.Timestamp{0, 0}, "1970-01-01T00:00:00Z"}, + {&tspb.Timestamp{minValidSeconds - 1, 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"}, + } { + got := TimestampString(test.ts) + if got != test.want { + t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) + } + } +} + +func utcDate(year, month, day int) time.Time { + return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 00000000000..5e52a81c7b8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto +// DO NOT EDIT! + +/* +Package wrappers is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package wrappers + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 260 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f, + 0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, + 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3, + 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26, + 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, 0x71, 0x87, + 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, 0x55, 0xc4, + 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, 0x39, 0xc1, + 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, 0xb5, 0x18, + 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x7a, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xd0, 0x75, + 0xe2, 0x0d, 0x87, 0x06, 0x7f, 0x00, 0x48, 0x24, 0x80, 0x31, 0x4a, 0x8b, 0xf8, 0xa8, 0x5b, 0xc0, + 0xc8, 0xf8, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0xd1, 0x01, 0x50, 0xd5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, + 0x5d, 0x49, 0x6c, 0x60, 0x63, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b, + 0x1c, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 00000000000..4828ad9ae93 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,119 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes new file mode 100644 index 00000000000..d2f212e5da8 --- /dev/null +++ b/vendor/golang.org/x/net/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore new file mode 100644 index 00000000000..8339fd61d3f --- /dev/null +++ b/vendor/golang.org/x/net/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 00000000000..15167cd746c --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md new file mode 100644 index 00000000000..88dff59bc7d --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 00000000000..1c4577e9680 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README new file mode 100644 index 00000000000..6b13d8e5050 --- /dev/null +++ b/vendor/golang.org/x/net/README @@ -0,0 +1,3 @@ +This repository holds supplementary Go networking libraries. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 00000000000..15e21b18122 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 00000000000..2c8bbab7f7f --- /dev/null +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,215 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested = 16 + // ExtMark returns the packet's mark value. + ExtMark = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto = 60 + // ExtRand returns a uniformly random uint32. + ExtRand = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU + opMaskOperandSrc = 0x08 + opMaskOperator = 0xf0 + // opClsJump + opMaskJumpConst = 0x0f + opMaskJumpCond = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operator defined by ALUOp* + +const ( + opALUSrcConstant uint16 = iota << 3 + opALUSrcX +) + +const ( + opJumpAlways = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 00000000000..ae62feb5341 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 00000000000..68ae6f549a2 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,434 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + if ri.Op&opMaskOperandSrc != 0 { + return ALUOpX{Op: op} + } + return ALUOpConstant{Op: op, Val: ri.K} + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + if ri.Op&opMaskJumpConst != opClsJump { + return ri + } + switch ri.Op & opMaskJumpCond { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual: + return JumpIf{ + Cond: JumpEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGT: + return JumpIf{ + Cond: JumpGreaterThan, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGE: + return JumpIf{ + Cond: JumpGreaterOrEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpSet: + return JumpIf{ + Cond: JumpBitsSet, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(-0x1000+a.Num)) +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcConstant | uint16(a.Op), + K: a.Val, + }, nil +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcX | uint16(a.Op), + }, nil +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | opJumpAlways, + K: a.Skip, + }, nil +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + var ( + cond uint16 + flip bool + ) + switch a.Cond { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond) + } + jt, jf := a.SkipTrue, a.SkipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | cond, + Jt: jt, + Jf: jf, + K: a.Val, + }, nil +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/vendor/golang.org/x/net/bpf/instructions_test.go b/vendor/golang.org/x/net/bpf/instructions_test.go new file mode 100644 index 00000000000..833d1e17511 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions_test.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "io/ioutil" + "reflect" + "strconv" + "strings" + "testing" +) + +// This is a direct translation of the program in +// testdata/all_instructions.txt. +var allInstructions = []Instruction{ + LoadConstant{Dst: RegA, Val: 42}, + LoadConstant{Dst: RegX, Val: 42}, + + LoadScratch{Dst: RegA, N: 3}, + LoadScratch{Dst: RegX, N: 3}, + + LoadAbsolute{Off: 42, Size: 1}, + LoadAbsolute{Off: 42, Size: 2}, + LoadAbsolute{Off: 42, Size: 4}, + + LoadIndirect{Off: 42, Size: 1}, + LoadIndirect{Off: 42, Size: 2}, + LoadIndirect{Off: 42, Size: 4}, + + LoadMemShift{Off: 42}, + + LoadExtension{Num: ExtLen}, + LoadExtension{Num: ExtProto}, + LoadExtension{Num: ExtType}, + LoadExtension{Num: ExtRand}, + + StoreScratch{Src: RegA, N: 3}, + StoreScratch{Src: RegX, N: 3}, + + ALUOpConstant{Op: ALUOpAdd, Val: 42}, + ALUOpConstant{Op: ALUOpSub, Val: 42}, + ALUOpConstant{Op: ALUOpMul, Val: 42}, + ALUOpConstant{Op: ALUOpDiv, Val: 42}, + ALUOpConstant{Op: ALUOpOr, Val: 42}, + ALUOpConstant{Op: ALUOpAnd, Val: 42}, + ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + ALUOpConstant{Op: ALUOpMod, Val: 42}, + ALUOpConstant{Op: ALUOpXor, Val: 42}, + + ALUOpX{Op: ALUOpAdd}, + ALUOpX{Op: ALUOpSub}, + ALUOpX{Op: ALUOpMul}, + ALUOpX{Op: ALUOpDiv}, + ALUOpX{Op: ALUOpOr}, + ALUOpX{Op: ALUOpAnd}, + ALUOpX{Op: ALUOpShiftLeft}, + ALUOpX{Op: ALUOpShiftRight}, + ALUOpX{Op: ALUOpMod}, + ALUOpX{Op: ALUOpXor}, + + NegateA{}, + + Jump{Skip: 10}, + JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + + TAX{}, + TXA{}, + + RetA{}, + RetConstant{Val: 42}, +} +var allInstructionsExpected = "testdata/all_instructions.bpf" + +// Check that we produce the same output as the canonical bpf_asm +// linux kernel tool. +func TestInterop(t *testing.T) { + out, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(out)) + + bs, err := ioutil.ReadFile(allInstructionsExpected) + if err != nil { + t.Fatalf("reading %s: %s", allInstructionsExpected, err) + } + // First statement is the number of statements, last statement is + // empty. We just ignore both and rely on slice length. + stmts := strings.Split(string(bs), ",") + if len(stmts)-2 != len(out) { + t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions)) + } + + for i, stmt := range stmts[1 : len(stmts)-2] { + nums := strings.Split(stmt, " ") + if len(nums) != 4 { + t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt) + } + + actual := out[i] + + op, err := strconv.ParseUint(nums[0], 10, 16) + if err != nil { + t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected) + } + if actual.Op != uint16(op) { + t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op) + } + + jt, err := strconv.ParseUint(nums[1], 10, 8) + if err != nil { + t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected) + } + if actual.Jt != uint8(jt) { + t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt) + } + + jf, err := strconv.ParseUint(nums[2], 10, 8) + if err != nil { + t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected) + } + if actual.Jf != uint8(jf) { + t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf) + } + + k, err := strconv.ParseUint(nums[3], 10, 32) + if err != nil { + t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected) + } + if actual.K != uint32(k) { + t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k) + } + } +} + +// Check that assembly and disassembly match each other. +// +// Because we offer "fake" jump conditions that don't appear in the +// machine code, disassembly won't be a 1:1 match with the original +// source, although the behavior will be identical. However, +// reassembling the disassembly should produce an identical program. +func TestAsmDisasm(t *testing.T) { + prog1, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(prog1)) + + src, allDecoded := Disassemble(prog1) + if !allDecoded { + t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:") + for i, inst := range src { + if r, ok := inst.(RawInstruction); ok { + t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r) + } + } + } + + prog2, err := Assemble(src) + if err != nil { + t.Fatalf("assembly of Disassemble(Assemble(allInstructions)) failed: %s", err) + } + + if len(prog2) != len(prog1) { + t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(prog1), len(prog2)) + } + if !reflect.DeepEqual(prog1, prog2) { + t.Errorf("program mutated by disassembly:") + for i := range prog2 { + if !reflect.DeepEqual(prog1[i], prog2[i]) { + t.Logf(" insn %d, s: %#v, p1: %#v, p2: %#v", i+1, allInstructions[i], prog1[i], prog2[i]) + } + } + } +} diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf new file mode 100644 index 00000000000..f87144064b9 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf @@ -0,0 +1 @@ +50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0, diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt new file mode 100644 index 00000000000..30455015536 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt @@ -0,0 +1,79 @@ +# This filter is compiled to all_instructions.bpf by the `bpf_asm` +# tool, which can be found in the linux kernel source tree under +# tools/net. + +# Load immediate +ld #42 +ldx #42 + +# Load scratch +ld M[3] +ldx M[3] + +# Load absolute +ldb [42] +ldh [42] +ld [42] + +# Load indirect +ldb [x + 42] +ldh [x + 42] +ld [x + 42] + +# Load IPv4 header length +ldx 4*([42]&0xf) + +# Run extension function +ld #len +ld #proto +ld #type +ld #rand + +# Store scratch +st M[3] +stx M[3] + +# A constant +add #42 +sub #42 +mul #42 +div #42 +or #42 +and #42 +lsh #42 +rsh #42 +mod #42 +xor #42 + +# A X +add x +sub x +mul x +div x +or x +and x +lsh x +rsh x +mod x +xor x + +# !A +neg + +# Jumps +ja end +jeq #42,prev,end +jne #42,end +jlt #42,end +jle #42,end +jgt #42,prev,end +jge #42,prev,end +jset #42,prev,end + +# Register transfers +tax +txa + +# Returns +prev: ret a +end: ret #42 diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 00000000000..4c656f1e12a --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,140 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_aluop_test.go b/vendor/golang.org/x/net/bpf/vm_aluop_test.go new file mode 100644 index 00000000000..16678244aa4 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_aluop_test.go @@ -0,0 +1,512 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMALUOpAdd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 3, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 8, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpSub(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + bpf.ALUOpX{ + Op: bpf.ALUOpSub, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMul(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMul, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 6, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDiv(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 20, 2, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.ALUOpConstant{ + Op: bpf.ALUOpDiv, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpDivByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1/0 + bpf.ALUOpX{ + Op: bpf.ALUOpDiv, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpOr(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpOr, + Val: 0x01, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x10, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, + 0x09, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 9, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpAnd(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAnd, + Val: 0x0019, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xaa, 0x09, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftLeft(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftLeft, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0xaa, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpShiftRight(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpShiftRight, + Val: 0x01, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x04, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x08, 0xff, 0xff, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpMod(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 20, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 30, 0, 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpModByZeroALUOpConstant(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpMod, + Val: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot divide by zero using ALUOpConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMALUOpModByZeroALUOpX(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 0 into X + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.TAX{}, + // Load byte 1 into A + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Attempt to perform 1%0 + bpf.ALUOpX{ + Op: bpf.ALUOpMod, + }, + // Return 4 bytes if program does not terminate + bpf.LoadConstant{ + Val: 12, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 3, 4, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpXor(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpXor, + Val: 0x0a, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x01, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0b, 0x00, 0x00, 0x00, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMALUOpUnknown(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.ALUOpConstant{ + Op: bpf.ALUOpAdd, + Val: 1, + }, + // Verify that an unknown operation is a no-op + bpf.ALUOpConstant{ + Op: 100, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 0x02, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/golang.org/x/net/bpf/vm_bpf_test.go new file mode 100644 index 00000000000..426362361a5 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_bpf_test.go @@ -0,0 +1,192 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +// A virtualMachine is a BPF virtual machine which can process an +// input packet against a BPF program and render a verdict. +type virtualMachine interface { + Run(in []byte) (int, error) +} + +// canUseOSVM indicates if the OS BPF VM is available on this platform. +func canUseOSVM() bool { + // OS BPF VM can only be used on platforms where x/net/ipv4 supports + // attaching a BPF program to a socket. + switch runtime.GOOS { + case "linux": + return true + } + + return false +} + +// All BPF tests against both the Go VM and OS VM are assumed to +// be used with a UDP socket. As a result, the entire contents +// of a UDP datagram is sent through the BPF program, but only +// the body after the UDP header will ever be returned in output. + +// testVM sets up a Go BPF VM, and if available, a native OS BPF VM +// for integration testing. +func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) { + goVM, err := bpf.NewVM(filter) + if err != nil { + // Some tests expect an error, so this error must be returned + // instead of fatally exiting the test + return nil, nil, err + } + + mvm := &multiVirtualMachine{ + goVM: goVM, + + t: t, + } + + // If available, add the OS VM for tests which verify that both the Go + // VM and OS VM have exactly the same output for the same input program + // and packet. + done := func() {} + if canUseOSVM() { + osVM, osVMDone := testOSVM(t, filter) + done = func() { osVMDone() } + mvm.osVM = osVM + } + + return mvm, done, nil +} + +// udpHeaderLen is the length of a UDP header. +const udpHeaderLen = 8 + +// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM +// and the native OS VM, if the OS VM is available. +type multiVirtualMachine struct { + goVM virtualMachine + osVM virtualMachine + + t *testing.T +} + +func (mvm *multiVirtualMachine) Run(in []byte) (int, error) { + if len(in) < udpHeaderLen { + mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d", + udpHeaderLen, len(in)) + } + + // All tests have a UDP header as part of input, because the OS VM + // packets always will. For the Go VM, this output is trimmed before + // being sent back to tests. + goOut, goErr := mvm.goVM.Run(in) + if goOut >= udpHeaderLen { + goOut -= udpHeaderLen + } + + // If Go output is larger than the size of the packet, packet filtering + // interop tests must trim the output bytes to the length of the packet. + // The BPF VM should not do this on its own, as other uses of it do + // not trim the output byte count. + trim := len(in) - udpHeaderLen + if goOut > trim { + goOut = trim + } + + // When the OS VM is not available, process using the Go VM alone + if mvm.osVM == nil { + return goOut, goErr + } + + // The OS VM will apply its own UDP header, so remove the pseudo header + // that the Go VM needs. + osOut, err := mvm.osVM.Run(in[udpHeaderLen:]) + if err != nil { + mvm.t.Fatalf("error while running OS VM: %v", err) + } + + // Verify both VMs return same number of bytes + var mismatch bool + if goOut != osOut { + mismatch = true + mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut) + } + + if mismatch { + mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match") + } + + return goOut, goErr +} + +// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for +// processing BPF programs. +type osVirtualMachine struct { + l net.PacketConn + s net.Conn +} + +// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting +// packets into a UDP listener with a BPF program attached to it. +func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to open OS VM UDP listener: %v", err) + } + + prog, err := bpf.Assemble(filter) + if err != nil { + t.Fatalf("failed to compile BPF program: %v", err) + } + + p := ipv4.NewPacketConn(l) + if err = p.SetBPF(prog); err != nil { + t.Fatalf("failed to attach BPF program to listener: %v", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to dial connection to listener: %v", err) + } + + done := func() { + _ = s.Close() + _ = l.Close() + } + + return &osVirtualMachine{ + l: l, + s: s, + }, done +} + +// Run sends the input bytes into the OS's BPF VM and returns its verdict. +func (vm *osVirtualMachine) Run(in []byte) (int, error) { + go func() { + _, _ = vm.s.Write(in) + }() + + vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond)) + + var b [512]byte + n, _, err := vm.l.ReadFrom(b[:]) + if err != nil { + // A timeout indicates that BPF filtered out the packet, and thus, + // no input should be returned. + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + return n, nil + } + + return n, err + } + + return n, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_extension_test.go b/vendor/golang.org/x/net/bpf/vm_extension_test.go new file mode 100644 index 00000000000..7a48c82f3c1 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_extension_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMLoadExtensionNotImplemented(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: 100, + }, + bpf.RetA{}, + }) + if errStr(err) != "extension 100 not implemented" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadExtensionExtLen(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadExtension{ + Num: bpf.ExtLen, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 00000000000..516f9462b9f --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,174 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, value uint32) int { + var ok bool + inV := uint32(ins.Val) + + switch ins.Cond { + case JumpEqual: + ok = value == inV + case JumpNotEqual: + ok = value != inV + case JumpGreaterThan: + ok = value > inV + case JumpLessThan: + ok = value < inV + case JumpGreaterOrEqual: + ok = value >= inV + case JumpLessOrEqual: + ok = value <= inV + case JumpBitsSet: + ok = (value & inV) != 0 + case JumpBitsNotSet: + ok = (value & inV) == 0 + } + + if ok { + return int(ins.SkipTrue) + } + + return int(ins.SkipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + if !inBounds(len(in), offset, 0) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/vendor/golang.org/x/net/bpf/vm_jump_test.go b/vendor/golang.org/x/net/bpf/vm_jump_test.go new file mode 100644 index 00000000000..e0a3a988bfe --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_jump_test.go @@ -0,0 +1,380 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMJumpOne(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.Jump{ + Skip: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.Jump{ + Skip: 1, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfTrueOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipTrue: 2, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfFalseOutOfProgram(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.JumpIf{ + Cond: bpf.JumpEqual, + SkipFalse: 3, + }, + bpf.RetA{}, + }) + if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMJumpIfEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 1, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfNotEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.JumpIf{ + Cond: bpf.JumpNotEqual, + Val: 1, + SkipFalse: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: 0x00010202, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessThan(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessThan, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfGreaterOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpGreaterOrEqual, + Val: 0x00010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfLessOrEqual(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 4, + }, + bpf.JumpIf{ + Cond: bpf.JumpLessOrEqual, + Val: 0xff010203, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 12, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 4, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsSet, + Val: 0x1122, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMJumpIfBitsNotSet(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.JumpIf{ + Cond: bpf.JumpBitsNotSet, + Val: 0x1221, + SkipTrue: 1, + }, + bpf.RetConstant{ + Val: 0, + }, + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x01, 0x02, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_load_test.go b/vendor/golang.org/x/net/bpf/vm_load_test.go new file mode 100644 index 00000000000..04578b66b4d --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_load_test.go @@ -0,0 +1,246 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "net" + "testing" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 100, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Size: 5, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid load byte length 0" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadConstantOK(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegX, + Val: 9, + }, + bpf.TXA{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadIndirectOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadIndirect{ + Off: 100, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftOutOfBounds(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadMemShift{ + Off: 100, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +const ( + dhcp4Port = 53 +) + +func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with incorrect DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port + 1, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 0, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) { + vm, in, done := testDHCPv4(t) + defer done() + + // Append mostly empty UDP header with correct DHCPv4 port + in = append(in, []byte{ + 0, 0, + 0, dhcp4Port, + 0, 0, + 0, 0, + }...) + + out, err := vm.Run(in) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := len(in)-8, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) { + // DHCPv4 test data courtesy of David Anderson: + // https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70 + vm, done, err := testVM(t, []bpf.Instruction{ + // Load IPv4 packet length + bpf.LoadMemShift{Off: 8}, + // Get UDP dport + bpf.LoadIndirect{Off: 8 + 2, Size: 2}, + // Correct dport? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1}, + // Accept + bpf.RetConstant{Val: 1500}, + // Ignore + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + + // Minimal requirements to make a valid IPv4 header + h := &ipv4.Header{ + Len: ipv4.HeaderLen, + Src: net.IPv4(192, 168, 1, 1), + Dst: net.IPv4(192, 168, 1, 2), + } + hb, err := h.Marshal() + if err != nil { + t.Fatalf("failed to marshal IPv4 header: %v", err) + } + + hb = append([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, hb...) + + return vm, hb, done +} diff --git a/vendor/golang.org/x/net/bpf/vm_ret_test.go b/vendor/golang.org/x/net/bpf/vm_ret_test.go new file mode 100644 index 00000000000..2d86eae3e7d --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_ret_test.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMRetA(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 9, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetALargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadAbsolute{ + Off: 8, + Size: 2, + }, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 255, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstant(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 9, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 1, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMRetConstantLargerThanInput(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.RetConstant{ + Val: 16, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_scratch_test.go b/vendor/golang.org/x/net/bpf/vm_scratch_test.go new file mode 100644 index 00000000000..e600e3c282d --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_scratch_test.go @@ -0,0 +1,247 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "testing" + + "golang.org/x/net/bpf" +) + +func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: bpf.RegA, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchUnknownSourceRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.StoreScratch{ + Src: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid source register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: -1, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 16, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadScratch{ + Dst: 100, + N: 0, + }, + bpf.RetA{}, + }) + if errStr(err) != "assembling instruction 1: invalid target register 100" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMStoreScratchLoadScratchOneValue(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 255 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Copy to X and store in scratch[0] + bpf.TAX{}, + bpf.StoreScratch{ + Src: bpf.RegX, + N: 0, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Overwrite 1 with 255 from scratch[0] + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 0, + }, + // Return 255 + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 255, 1, 2, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 3, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} + +func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + // Load byte 10 + bpf.LoadAbsolute{ + Off: 8, + Size: 1, + }, + // Store in scratch[0] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 0, + }, + // Load byte 20 + bpf.LoadAbsolute{ + Off: 9, + Size: 1, + }, + // Store in scratch[1] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 1, + }, + // Load byte 30 + bpf.LoadAbsolute{ + Off: 10, + Size: 1, + }, + // Store in scratch[2] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 2, + }, + // Load byte 1 + bpf.LoadAbsolute{ + Off: 11, + Size: 1, + }, + // Store in scratch[3] + bpf.StoreScratch{ + Src: bpf.RegA, + N: 3, + }, + // Load in byte 10 to X + bpf.LoadScratch{ + Dst: bpf.RegX, + N: 0, + }, + // Copy X -> A + bpf.TXA{}, + // Verify value is 10 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 10, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 20 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 1, + }, + // Verify value is 20 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 20, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Load in byte 30 to A + bpf.LoadScratch{ + Dst: bpf.RegA, + N: 2, + }, + // Verify value is 30 + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: 30, + SkipTrue: 1, + }, + // Fail test if incorrect + bpf.RetConstant{ + Val: 0, + }, + // Return first two bytes on success + bpf.RetConstant{ + Val: 10, + }, + }) + if err != nil { + t.Fatalf("failed to load BPF program: %v", err) + } + defer done() + + out, err := vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 10, 20, 30, 1, + }) + if err != nil { + t.Fatalf("unexpected error while running program: %v", err) + } + if want, got := 2, out; want != got { + t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + want, got) + } +} diff --git a/vendor/golang.org/x/net/bpf/vm_test.go b/vendor/golang.org/x/net/bpf/vm_test.go new file mode 100644 index 00000000000..6bd4dd5c3a8 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_test.go @@ -0,0 +1,144 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf_test + +import ( + "fmt" + "testing" + + "golang.org/x/net/bpf" +) + +var _ bpf.Instruction = unknown{} + +type unknown struct{} + +func (unknown) Assemble() (bpf.RawInstruction, error) { + return bpf.RawInstruction{}, nil +} + +func TestVMUnknownInstruction(t *testing.T) { + vm, done, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 100, + }, + // Should terminate the program with an error immediately + unknown{}, + bpf.RetA{}, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer done() + + _, err = vm.Run([]byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, + }) + if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" { + t.Fatalf("unexpected error while running program: %v", err) + } +} + +func TestVMNoReturnInstruction(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{ + bpf.LoadConstant{ + Dst: bpf.RegA, + Val: 1, + }, + }) + if errStr(err) != "BPF program must end with RetA or RetConstant" { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestVMNoInputInstructions(t *testing.T) { + _, _, err := testVM(t, []bpf.Instruction{}) + if errStr(err) != "one or more Instructions must be specified" { + t.Fatalf("unexpected error: %v", err) + } +} + +// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame +// as input and checking its EtherType to determine if it should be accepted. +func ExampleNewVM() { + // Offset | Length | Comment + // ------------------------- + // 00 | 06 | Ethernet destination MAC address + // 06 | 06 | Ethernet source MAC address + // 12 | 02 | Ethernet EtherType + const ( + etOff = 12 + etLen = 2 + + etARP = 0x0806 + ) + + // Set up a VM to filter traffic based on if its EtherType + // matches the ARP EtherType. + vm, err := bpf.NewVM([]bpf.Instruction{ + // Load EtherType value from Ethernet header + bpf.LoadAbsolute{ + Off: etOff, + Size: etLen, + }, + // If EtherType is equal to the ARP EtherType, jump to allow + // packet to be accepted + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: etARP, + SkipTrue: 1, + }, + // EtherType does not match the ARP EtherType + bpf.RetConstant{ + Val: 0, + }, + // EtherType matches the ARP EtherType, accept up to 1500 + // bytes of packet + bpf.RetConstant{ + Val: 1500, + }, + }) + if err != nil { + panic(fmt.Sprintf("failed to load BPF program: %v", err)) + } + + // Create an Ethernet frame with the ARP EtherType for testing + frame := []byte{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, + 0x08, 0x06, + // Payload omitted for brevity + } + + // Run our VM's BPF program using the Ethernet frame as input + out, err := vm.Run(frame) + if err != nil { + panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err)) + } + + // BPF VM can return a byte count greater than the number of input + // bytes, so trim the output to match the input byte length + if out > len(frame) { + out = len(frame) + } + + fmt.Printf("out: %d bytes", out) + + // Output: + // out: 14 bytes +} + +// errStr returns the string representation of an error, or +// "" if it is nil. +func errStr(err error) string { + if err == nil { + return "" + } + + return err.Error() +} diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg new file mode 100644 index 00000000000..3f8b14b64e8 --- /dev/null +++ b/vendor/golang.org/x/net/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index e7ee376c47d..134654cf7e2 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -36,12 +36,7 @@ // Contexts. package context // import "golang.org/x/net/context" -import ( - "errors" - "fmt" - "sync" - "time" -) +import "time" // A Context carries a deadline, a cancelation signal, and other values across // API boundaries. @@ -66,7 +61,7 @@ type Context interface { // // // Stream generates values with DoSomething and sends them to out // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { + // func Stream(ctx context.Context, out chan<- Value) error { // for { // v, err := DoSomething(ctx) // if err != nil { @@ -138,48 +133,6 @@ type Context interface { Value(key interface{}) interface{} } -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming @@ -189,7 +142,7 @@ func Background() Context { } // TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it's is not yet available (because the +// it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine // whether Contexts are propagated correctly in a program. @@ -201,247 +154,3 @@ func TODO() Context { // A CancelFunc does not wait for the work to stop. // After the first call, subsequent calls to a CancelFunc do nothing. type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 00000000000..9554dcf7123 --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,577 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 100*time.Millisecond) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o = otherContext{c} + c, _ = WithTimeout(o, 300*time.Millisecond) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestCanceledTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 200*time.Millisecond) + o := otherContext{c} + c, cancel := WithTimeout(o, 400*time.Millisecond) + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 16, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000000..606cf1f9726 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go new file mode 100644 index 00000000000..9f0f90f1bc9 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,go1.7 + +package ctxhttp + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "context" +) + +func TestGo17Context(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + })) + ctx := context.Background() + resp, err := Get(ctx, http.DefaultClient, ts.URL) + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } + resp.Body.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 00000000000..926870cc23f --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go new file mode 100644 index 00000000000..9159cf02256 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!go1.7 + +package ctxhttp + +import ( + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go new file mode 100644 index 00000000000..1e4155180c2 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package ctxhttp + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func okHandler(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + io.WriteString(w, requestBody) +} + +func TestNoTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(okHandler)) + defer ts.Close() + + ctx := context.Background() + res, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != requestBody { + t.Errorf("body = %q; want %q", slurp, requestBody) + } +} + +func TestCancelBeforeHeaders(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + blockServer := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cancel() + <-blockServer + io.WriteString(w, requestBody) + })) + defer ts.Close() + defer close(blockServer) + + res, err := Get(ctx, nil, ts.URL) + if err == nil { + res.Body.Close() + t.Fatal("Get returned unexpected nil error") + } + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + })) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 00000000000..f8cda19adae --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 00000000000..5a30acabd03 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 00000000000..a6754dc3689 --- /dev/null +++ b/vendor/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "golang.org/x/net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +} diff --git a/vendor/golang.org/x/net/dict/dict.go b/vendor/golang.org/x/net/dict/dict.go new file mode 100644 index 00000000000..58fef89e060 --- /dev/null +++ b/vendor/golang.org/x/net/dict/dict.go @@ -0,0 +1,210 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dict implements the Dictionary Server Protocol +// as defined in RFC 2229. +package dict // import "golang.org/x/net/dict" + +import ( + "net/textproto" + "strconv" + "strings" +) + +// A Client represents a client connection to a dictionary server. +type Client struct { + text *textproto.Conn +} + +// Dial returns a new client connected to a dictionary server at +// addr on the given network. +func Dial(network, addr string) (*Client, error) { + text, err := textproto.Dial(network, addr) + if err != nil { + return nil, err + } + _, _, err = text.ReadCodeLine(220) + if err != nil { + text.Close() + return nil, err + } + return &Client{text: text}, nil +} + +// Close closes the connection to the dictionary server. +func (c *Client) Close() error { + return c.text.Close() +} + +// A Dict represents a dictionary available on the server. +type Dict struct { + Name string // short name of dictionary + Desc string // long description +} + +// Dicts returns a list of the dictionaries available on the server. +func (c *Client) Dicts() ([]Dict, error) { + id, err := c.text.Cmd("SHOW DB") + if err != nil { + return nil, err + } + + c.text.StartResponse(id) + defer c.text.EndResponse(id) + + _, _, err = c.text.ReadCodeLine(110) + if err != nil { + return nil, err + } + lines, err := c.text.ReadDotLines() + if err != nil { + return nil, err + } + _, _, err = c.text.ReadCodeLine(250) + + dicts := make([]Dict, len(lines)) + for i := range dicts { + d := &dicts[i] + a, _ := fields(lines[i]) + if len(a) < 2 { + return nil, textproto.ProtocolError("invalid dictionary: " + lines[i]) + } + d.Name = a[0] + d.Desc = a[1] + } + return dicts, err +} + +// A Defn represents a definition. +type Defn struct { + Dict Dict // Dict where definition was found + Word string // Word being defined + Text []byte // Definition text, typically multiple lines +} + +// Define requests the definition of the given word. +// The argument dict names the dictionary to use, +// the Name field of a Dict returned by Dicts. +// +// The special dictionary name "*" means to look in all the +// server's dictionaries. +// The special dictionary name "!" means to look in all the +// server's dictionaries in turn, stopping after finding the word +// in one of them. +func (c *Client) Define(dict, word string) ([]*Defn, error) { + id, err := c.text.Cmd("DEFINE %s %q", dict, word) + if err != nil { + return nil, err + } + + c.text.StartResponse(id) + defer c.text.EndResponse(id) + + _, line, err := c.text.ReadCodeLine(150) + if err != nil { + return nil, err + } + a, _ := fields(line) + if len(a) < 1 { + return nil, textproto.ProtocolError("malformed response: " + line) + } + n, err := strconv.Atoi(a[0]) + if err != nil { + return nil, textproto.ProtocolError("invalid definition count: " + a[0]) + } + def := make([]*Defn, n) + for i := 0; i < n; i++ { + _, line, err = c.text.ReadCodeLine(151) + if err != nil { + return nil, err + } + a, _ := fields(line) + if len(a) < 3 { + // skip it, to keep protocol in sync + i-- + n-- + def = def[0:n] + continue + } + d := &Defn{Word: a[0], Dict: Dict{a[1], a[2]}} + d.Text, err = c.text.ReadDotBytes() + if err != nil { + return nil, err + } + def[i] = d + } + _, _, err = c.text.ReadCodeLine(250) + return def, err +} + +// Fields returns the fields in s. +// Fields are space separated unquoted words +// or quoted with single or double quote. +func fields(s string) ([]string, error) { + var v []string + i := 0 + for { + for i < len(s) && (s[i] == ' ' || s[i] == '\t') { + i++ + } + if i >= len(s) { + break + } + if s[i] == '"' || s[i] == '\'' { + q := s[i] + // quoted string + var j int + for j = i + 1; ; j++ { + if j >= len(s) { + return nil, textproto.ProtocolError("malformed quoted string") + } + if s[j] == '\\' { + j++ + continue + } + if s[j] == q { + j++ + break + } + } + v = append(v, unquote(s[i+1:j-1])) + i = j + } else { + // atom + var j int + for j = i; j < len(s); j++ { + if s[j] == ' ' || s[j] == '\t' || s[j] == '\\' || s[j] == '"' || s[j] == '\'' { + break + } + } + v = append(v, s[i:j]) + i = j + } + if i < len(s) { + c := s[i] + if c != ' ' && c != '\t' { + return nil, textproto.ProtocolError("quotes not on word boundaries") + } + } + } + return v, nil +} + +func unquote(s string) string { + if strings.Index(s, "\\") < 0 { + return s + } + b := []byte(s) + w := 0 + for r := 0; r < len(b); r++ { + c := b[r] + if c == '\\' { + r++ + c = b[r] + } + b[w] = c + w++ + } + return string(b[0:w]) +} diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 00000000000..cd0a8ac1545 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/atom_test.go b/vendor/golang.org/x/net/html/atom/atom_test.go new file mode 100644 index 00000000000..6e33704dd5e --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom_test.go @@ -0,0 +1,109 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atom + +import ( + "sort" + "testing" +) + +func TestKnown(t *testing.T) { + for _, s := range testAtomList { + if atom := Lookup([]byte(s)); atom.String() != s { + t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String()) + } + } +} + +func TestHits(t *testing.T) { + for _, a := range table { + if a == 0 { + continue + } + got := Lookup([]byte(a.String())) + if got != a { + t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a)) + } + } +} + +func TestMisses(t *testing.T) { + testCases := []string{ + "", + "\x00", + "\xff", + "A", + "DIV", + "Div", + "dIV", + "aa", + "a\x00", + "ab", + "abb", + "abbr0", + "abbr ", + " abbr", + " a", + "acceptcharset", + "acceptCharset", + "accept_charset", + "h0", + "h1h2", + "h7", + "onClick", + "λ", + // The following string has the same hash (0xa1d7fab7) as "onmouseover". + "\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7", + } + for _, tc := range testCases { + got := Lookup([]byte(tc)) + if got != 0 { + t.Errorf("Lookup(%q): got %d, want 0", tc, got) + } + } +} + +func TestForeignObject(t *testing.T) { + const ( + afo = Foreignobject + afO = ForeignObject + sfo = "foreignobject" + sfO = "foreignObject" + ) + if got := Lookup([]byte(sfo)); got != afo { + t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo) + } + if got := Lookup([]byte(sfO)); got != afO { + t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO) + } + if got := afo.String(); got != sfo { + t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo) + } + if got := afO.String(); got != sfO { + t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO) + } +} + +func BenchmarkLookup(b *testing.B) { + sortedTable := make([]string, 0, len(table)) + for _, a := range table { + if a != 0 { + sortedTable = append(sortedTable, a.String()) + } + } + sort.Strings(sortedTable) + + x := make([][]byte, 1000) + for i := range x { + x[i] = []byte(sortedTable[i%len(sortedTable)]) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, s := range x { + Lookup(s) + } + } +} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go new file mode 100644 index 00000000000..6bfa8660198 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/gen.go @@ -0,0 +1,648 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go. +// Invoke as +// +// go run gen.go |gofmt >table.go +// go run gen.go -test |gofmt >table_test.go + +import ( + "flag" + "fmt" + "math/rand" + "os" + "sort" + "strings" +) + +// identifier converts s to a Go exported identifier. +// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". +func identifier(s string) string { + b := make([]byte, 0, len(s)) + cap := true + for _, c := range s { + if c == '-' { + cap = true + continue + } + if cap && 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + cap = false + b = append(b, byte(c)) + } + return string(b) +} + +var test = flag.Bool("test", false, "generate table_test.go") + +func main() { + flag.Parse() + + var all []string + all = append(all, elements...) + all = append(all, attributes...) + all = append(all, eventHandlers...) + all = append(all, extra...) + sort.Strings(all) + + if *test { + fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n") + fmt.Printf("package atom\n\n") + fmt.Printf("var testAtomList = []string{\n") + for _, s := range all { + fmt.Printf("\t%q,\n", s) + } + fmt.Printf("}\n") + return + } + + // uniq - lists have dups + // compute max len too + maxLen := 0 + w := 0 + for _, s := range all { + if w == 0 || all[w-1] != s { + if maxLen < len(s) { + maxLen = len(s) + } + all[w] = s + w++ + } + } + all = all[:w] + + // Find hash that minimizes table size. + var best *table + for i := 0; i < 1000000; i++ { + if best != nil && 1<<(best.k-1) < len(all) { + break + } + h := rand.Uint32() + for k := uint(0); k <= 16; k++ { + if best != nil && k >= best.k { + break + } + var t table + if t.init(h, k, all) { + best = &t + break + } + } + } + if best == nil { + fmt.Fprintf(os.Stderr, "failed to construct string table\n") + os.Exit(1) + } + + // Lay out strings, using overlaps when possible. + layout := append([]string{}, all...) + + // Remove strings that are substrings of other strings + for changed := true; changed; { + changed = false + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i != j && t != "" && strings.Contains(s, t) { + changed = true + layout[j] = "" + } + } + } + } + + // Join strings where one suffix matches another prefix. + for { + // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], + // maximizing overlap length k. + besti := -1 + bestj := -1 + bestk := 0 + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i == j { + continue + } + for k := bestk + 1; k <= len(s) && k <= len(t); k++ { + if s[len(s)-k:] == t[:k] { + besti = i + bestj = j + bestk = k + } + } + } + } + if bestk > 0 { + layout[besti] += layout[bestj][bestk:] + layout[bestj] = "" + continue + } + break + } + + text := strings.Join(layout, "") + + atom := map[string]uint32{} + for _, s := range all { + off := strings.Index(text, s) + if off < 0 { + panic("lost string " + s) + } + atom[s] = uint32(off<<8 | len(s)) + } + + // Generate the Go code. + fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Printf("package atom\n\nconst (\n") + for _, s := range all { + fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s]) + } + fmt.Printf(")\n\n") + + fmt.Printf("const hash0 = %#x\n\n", best.h0) + fmt.Printf("const maxAtomLen = %d\n\n", maxLen) + + fmt.Printf("var table = [1<<%d]Atom{\n", best.k) + for i, s := range best.tab { + if s == "" { + continue + } + fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s) + } + fmt.Printf("}\n") + datasize := (1 << best.k) * 4 + + fmt.Printf("const atomText =\n") + textsize := len(text) + for len(text) > 60 { + fmt.Printf("\t%q +\n", text[:60]) + text = text[60:] + } + fmt.Printf("\t%q\n\n", text) + + fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) +} + +type byLen []string + +func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } +func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byLen) Len() int { return len(x) } + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s string) uint32 { + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// A table represents an attempt at constructing the lookup table. +// The lookup table uses cuckoo hashing, meaning that each string +// can be found in one of two positions. +type table struct { + h0 uint32 + k uint + mask uint32 + tab []string +} + +// hash returns the two hashes for s. +func (t *table) hash(s string) (h1, h2 uint32) { + h := fnv(t.h0, s) + h1 = h & t.mask + h2 = (h >> 16) & t.mask + return +} + +// init initializes the table with the given parameters. +// h0 is the initial hash value, +// k is the number of bits of hash value to use, and +// x is the list of strings to store in the table. +// init returns false if the table cannot be constructed. +func (t *table) init(h0 uint32, k uint, x []string) bool { + t.h0 = h0 + t.k = k + t.tab = make([]string, 1< len(t.tab) { + return false + } + s := t.tab[i] + h1, h2 := t.hash(s) + j := h1 + h2 - i + if t.tab[j] != "" && !t.push(j, depth+1) { + return false + } + t.tab[j] = s + return true +} + +// The lists of element names and attribute keys were taken from +// https://html.spec.whatwg.org/multipage/indices.html#index +// as of the "HTML Living Standard - Last Updated 21 February 2015" version. + +var elements = []string{ + "a", + "abbr", + "address", + "area", + "article", + "aside", + "audio", + "b", + "base", + "bdi", + "bdo", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "cite", + "code", + "col", + "colgroup", + "command", + "data", + "datalist", + "dd", + "del", + "details", + "dfn", + "dialog", + "div", + "dl", + "dt", + "em", + "embed", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "hgroup", + "hr", + "html", + "i", + "iframe", + "img", + "input", + "ins", + "kbd", + "keygen", + "label", + "legend", + "li", + "link", + "map", + "mark", + "menu", + "menuitem", + "meta", + "meter", + "nav", + "noscript", + "object", + "ol", + "optgroup", + "option", + "output", + "p", + "param", + "pre", + "progress", + "q", + "rp", + "rt", + "ruby", + "s", + "samp", + "script", + "section", + "select", + "small", + "source", + "span", + "strong", + "style", + "sub", + "summary", + "sup", + "table", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "u", + "ul", + "var", + "video", + "wbr", +} + +// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 + +var attributes = []string{ + "abbr", + "accept", + "accept-charset", + "accesskey", + "action", + "alt", + "async", + "autocomplete", + "autofocus", + "autoplay", + "challenge", + "charset", + "checked", + "cite", + "class", + "cols", + "colspan", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "datetime", + "default", + "defer", + "dir", + "dirname", + "disabled", + "download", + "draggable", + "dropzone", + "enctype", + "for", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "headers", + "height", + "hidden", + "high", + "href", + "hreflang", + "http-equiv", + "icon", + "id", + "inputmode", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "keytype", + "kind", + "label", + "lang", + "list", + "loop", + "low", + "manifest", + "max", + "maxlength", + "media", + "mediagroup", + "method", + "min", + "minlength", + "multiple", + "muted", + "name", + "novalidate", + "open", + "optimum", + "pattern", + "ping", + "placeholder", + "poster", + "preload", + "radiogroup", + "readonly", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "sandbox", + "spellcheck", + "scope", + "scoped", + "seamless", + "selected", + "shape", + "size", + "sizes", + "sortable", + "sorted", + "span", + "src", + "srcdoc", + "srclang", + "start", + "step", + "style", + "tabindex", + "target", + "title", + "translate", + "type", + "typemustmatch", + "usemap", + "value", + "width", + "wrap", +} + +var eventHandlers = []string{ + "onabort", + "onautocomplete", + "onautocompleteerror", + "onafterprint", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncuechange", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadstart", + "onmessage", + "onmousedown", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onreset", + "onresize", + "onscroll", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunload", + "onvolumechange", + "onwaiting", +} + +// extra are ad-hoc values not covered by any of the lists above. +var extra = []string{ + "align", + "annotation", + "annotation-xml", + "applet", + "basefont", + "bgsound", + "big", + "blink", + "center", + "color", + "desc", + "face", + "font", + "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. + "foreignobject", + "frame", + "frameset", + "image", + "isindex", + "listing", + "malignmark", + "marquee", + "math", + "mglyph", + "mi", + "mn", + "mo", + "ms", + "mtext", + "nobr", + "noembed", + "noframes", + "plaintext", + "prompt", + "public", + "spacer", + "strike", + "svg", + "system", + "tt", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 00000000000..2605ba3102f --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,713 @@ +// generated by go run gen.go; DO NOT EDIT + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x2106 + AcceptCharset Atom = 0x210e + Accesskey Atom = 0x3309 + Action Atom = 0x1f606 + Address Atom = 0x4f307 + Align Atom = 0x1105 + Alt Atom = 0x4503 + Annotation Atom = 0x1670a + AnnotationXml Atom = 0x1670e + Applet Atom = 0x2b306 + Area Atom = 0x2fa04 + Article Atom = 0x38807 + Aside Atom = 0x8305 + Async Atom = 0x7b05 + Audio Atom = 0xa605 + Autocomplete Atom = 0x1fc0c + Autofocus Atom = 0xb309 + Autoplay Atom = 0xce08 + B Atom = 0x101 + Base Atom = 0xd604 + Basefont Atom = 0xd608 + Bdi Atom = 0x1a03 + Bdo Atom = 0xe703 + Bgsound Atom = 0x11807 + Big Atom = 0x12403 + Blink Atom = 0x12705 + Blockquote Atom = 0x12c0a + Body Atom = 0x2f04 + Br Atom = 0x202 + Button Atom = 0x13606 + Canvas Atom = 0x7f06 + Caption Atom = 0x1bb07 + Center Atom = 0x5b506 + Challenge Atom = 0x21f09 + Charset Atom = 0x2807 + Checked Atom = 0x32807 + Cite Atom = 0x3c804 + Class Atom = 0x4de05 + Code Atom = 0x14904 + Col Atom = 0x15003 + Colgroup Atom = 0x15008 + Color Atom = 0x15d05 + Cols Atom = 0x16204 + Colspan Atom = 0x16207 + Command Atom = 0x17507 + Content Atom = 0x42307 + Contenteditable Atom = 0x4230f + Contextmenu Atom = 0x3310b + Controls Atom = 0x18808 + Coords Atom = 0x19406 + Crossorigin Atom = 0x19f0b + Data Atom = 0x44a04 + Datalist Atom = 0x44a08 + Datetime Atom = 0x23c08 + Dd Atom = 0x26702 + Default Atom = 0x8607 + Defer Atom = 0x14b05 + Del Atom = 0x3ef03 + Desc Atom = 0x4db04 + Details Atom = 0x4807 + Dfn Atom = 0x6103 + Dialog Atom = 0x1b06 + Dir Atom = 0x6903 + Dirname Atom = 0x6907 + Disabled Atom = 0x10c08 + Div Atom = 0x11303 + Dl Atom = 0x11e02 + Download Atom = 0x40008 + Draggable Atom = 0x17b09 + Dropzone Atom = 0x39108 + Dt Atom = 0x50902 + Em Atom = 0x6502 + Embed Atom = 0x6505 + Enctype Atom = 0x21107 + Face Atom = 0x5b304 + Fieldset Atom = 0x1b008 + Figcaption Atom = 0x1b80a + Figure Atom = 0x1cc06 + Font Atom = 0xda04 + Footer Atom = 0x8d06 + For Atom = 0x1d803 + ForeignObject Atom = 0x1d80d + Foreignobject Atom = 0x1e50d + Form Atom = 0x1f204 + Formaction Atom = 0x1f20a + Formenctype Atom = 0x20d0b + Formmethod Atom = 0x2280a + Formnovalidate Atom = 0x2320e + Formtarget Atom = 0x2470a + Frame Atom = 0x9a05 + Frameset Atom = 0x9a08 + H1 Atom = 0x26e02 + H2 Atom = 0x29402 + H3 Atom = 0x2a702 + H4 Atom = 0x2e902 + H5 Atom = 0x2f302 + H6 Atom = 0x50b02 + Head Atom = 0x2d504 + Header Atom = 0x2d506 + Headers Atom = 0x2d507 + Height Atom = 0x25106 + Hgroup Atom = 0x25906 + Hidden Atom = 0x26506 + High Atom = 0x26b04 + Hr Atom = 0x27002 + Href Atom = 0x27004 + Hreflang Atom = 0x27008 + Html Atom = 0x25504 + HttpEquiv Atom = 0x2780a + I Atom = 0x601 + Icon Atom = 0x42204 + Id Atom = 0x8502 + Iframe Atom = 0x29606 + Image Atom = 0x29c05 + Img Atom = 0x2a103 + Input Atom = 0x3e805 + Inputmode Atom = 0x3e809 + Ins Atom = 0x1a803 + Isindex Atom = 0x2a907 + Ismap Atom = 0x2b005 + Itemid Atom = 0x33c06 + Itemprop Atom = 0x3c908 + Itemref Atom = 0x5ad07 + Itemscope Atom = 0x2b909 + Itemtype Atom = 0x2c308 + Kbd Atom = 0x1903 + Keygen Atom = 0x3906 + Keytype Atom = 0x53707 + Kind Atom = 0x10904 + Label Atom = 0xf005 + Lang Atom = 0x27404 + Legend Atom = 0x18206 + Li Atom = 0x1202 + Link Atom = 0x12804 + List Atom = 0x44e04 + Listing Atom = 0x44e07 + Loop Atom = 0xf404 + Low Atom = 0x11f03 + Malignmark Atom = 0x100a + Manifest Atom = 0x5f108 + Map Atom = 0x2b203 + Mark Atom = 0x1604 + Marquee Atom = 0x2cb07 + Math Atom = 0x2d204 + Max Atom = 0x2e103 + Maxlength Atom = 0x2e109 + Media Atom = 0x6e05 + Mediagroup Atom = 0x6e0a + Menu Atom = 0x33804 + Menuitem Atom = 0x33808 + Meta Atom = 0x45d04 + Meter Atom = 0x24205 + Method Atom = 0x22c06 + Mglyph Atom = 0x2a206 + Mi Atom = 0x2eb02 + Min Atom = 0x2eb03 + Minlength Atom = 0x2eb09 + Mn Atom = 0x23502 + Mo Atom = 0x3ed02 + Ms Atom = 0x2bc02 + Mtext Atom = 0x2f505 + Multiple Atom = 0x30308 + Muted Atom = 0x30b05 + Name Atom = 0x6c04 + Nav Atom = 0x3e03 + Nobr Atom = 0x5704 + Noembed Atom = 0x6307 + Noframes Atom = 0x9808 + Noscript Atom = 0x3d208 + Novalidate Atom = 0x2360a + Object Atom = 0x1ec06 + Ol Atom = 0xc902 + Onabort Atom = 0x13a07 + Onafterprint Atom = 0x1c00c + Onautocomplete Atom = 0x1fa0e + Onautocompleteerror Atom = 0x1fa13 + Onbeforeprint Atom = 0x6040d + Onbeforeunload Atom = 0x4e70e + Onblur Atom = 0xaa06 + Oncancel Atom = 0xe908 + Oncanplay Atom = 0x28509 + Oncanplaythrough Atom = 0x28510 + Onchange Atom = 0x3a708 + Onclick Atom = 0x31007 + Onclose Atom = 0x31707 + Oncontextmenu Atom = 0x32f0d + Oncuechange Atom = 0x3420b + Ondblclick Atom = 0x34d0a + Ondrag Atom = 0x35706 + Ondragend Atom = 0x35709 + Ondragenter Atom = 0x3600b + Ondragleave Atom = 0x36b0b + Ondragover Atom = 0x3760a + Ondragstart Atom = 0x3800b + Ondrop Atom = 0x38f06 + Ondurationchange Atom = 0x39f10 + Onemptied Atom = 0x39609 + Onended Atom = 0x3af07 + Onerror Atom = 0x3b607 + Onfocus Atom = 0x3bd07 + Onhashchange Atom = 0x3da0c + Oninput Atom = 0x3e607 + Oninvalid Atom = 0x3f209 + Onkeydown Atom = 0x3fb09 + Onkeypress Atom = 0x4080a + Onkeyup Atom = 0x41807 + Onlanguagechange Atom = 0x43210 + Onload Atom = 0x44206 + Onloadeddata Atom = 0x4420c + Onloadedmetadata Atom = 0x45510 + Onloadstart Atom = 0x46b0b + Onmessage Atom = 0x47609 + Onmousedown Atom = 0x47f0b + Onmousemove Atom = 0x48a0b + Onmouseout Atom = 0x4950a + Onmouseover Atom = 0x4a20b + Onmouseup Atom = 0x4ad09 + Onmousewheel Atom = 0x4b60c + Onoffline Atom = 0x4c209 + Ononline Atom = 0x4cb08 + Onpagehide Atom = 0x4d30a + Onpageshow Atom = 0x4fe0a + Onpause Atom = 0x50d07 + Onplay Atom = 0x51706 + Onplaying Atom = 0x51709 + Onpopstate Atom = 0x5200a + Onprogress Atom = 0x52a0a + Onratechange Atom = 0x53e0c + Onreset Atom = 0x54a07 + Onresize Atom = 0x55108 + Onscroll Atom = 0x55f08 + Onseeked Atom = 0x56708 + Onseeking Atom = 0x56f09 + Onselect Atom = 0x57808 + Onshow Atom = 0x58206 + Onsort Atom = 0x58b06 + Onstalled Atom = 0x59509 + Onstorage Atom = 0x59e09 + Onsubmit Atom = 0x5a708 + Onsuspend Atom = 0x5bb09 + Ontimeupdate Atom = 0xdb0c + Ontoggle Atom = 0x5c408 + Onunload Atom = 0x5cc08 + Onvolumechange Atom = 0x5d40e + Onwaiting Atom = 0x5e209 + Open Atom = 0x3cf04 + Optgroup Atom = 0xf608 + Optimum Atom = 0x5eb07 + Option Atom = 0x60006 + Output Atom = 0x49c06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x5107 + Ping Atom = 0x7704 + Placeholder Atom = 0xc30b + Plaintext Atom = 0xfd09 + Poster Atom = 0x15706 + Pre Atom = 0x25e03 + Preload Atom = 0x25e07 + Progress Atom = 0x52c08 + Prompt Atom = 0x5fa06 + Public Atom = 0x41e06 + Q Atom = 0x13101 + Radiogroup Atom = 0x30a + Readonly Atom = 0x2fb08 + Rel Atom = 0x25f03 + Required Atom = 0x1d008 + Reversed Atom = 0x5a08 + Rows Atom = 0x9204 + Rowspan Atom = 0x9207 + Rp Atom = 0x1c602 + Rt Atom = 0x13f02 + Ruby Atom = 0xaf04 + S Atom = 0x2c01 + Samp Atom = 0x4e04 + Sandbox Atom = 0xbb07 + Scope Atom = 0x2bd05 + Scoped Atom = 0x2bd06 + Script Atom = 0x3d406 + Seamless Atom = 0x31c08 + Section Atom = 0x4e207 + Select Atom = 0x57a06 + Selected Atom = 0x57a08 + Shape Atom = 0x4f905 + Size Atom = 0x55504 + Sizes Atom = 0x55505 + Small Atom = 0x18f05 + Sortable Atom = 0x58d08 + Sorted Atom = 0x19906 + Source Atom = 0x1aa06 + Spacer Atom = 0x2db06 + Span Atom = 0x9504 + Spellcheck Atom = 0x3230a + Src Atom = 0x3c303 + Srcdoc Atom = 0x3c306 + Srclang Atom = 0x41107 + Start Atom = 0x38605 + Step Atom = 0x5f704 + Strike Atom = 0x53306 + Strong Atom = 0x55906 + Style Atom = 0x61105 + Sub Atom = 0x5a903 + Summary Atom = 0x61607 + Sup Atom = 0x61d03 + Svg Atom = 0x62003 + System Atom = 0x62306 + Tabindex Atom = 0x46308 + Table Atom = 0x42d05 + Target Atom = 0x24b06 + Tbody Atom = 0x2e05 + Td Atom = 0x4702 + Template Atom = 0x62608 + Textarea Atom = 0x2f608 + Tfoot Atom = 0x8c05 + Th Atom = 0x22e02 + Thead Atom = 0x2d405 + Time Atom = 0xdd04 + Title Atom = 0xa105 + Tr Atom = 0x10502 + Track Atom = 0x10505 + Translate Atom = 0x14009 + Tt Atom = 0x5302 + Type Atom = 0x21404 + Typemustmatch Atom = 0x2140d + U Atom = 0xb01 + Ul Atom = 0x8a02 + Usemap Atom = 0x51106 + Value Atom = 0x4005 + Var Atom = 0x11503 + Video Atom = 0x28105 + Wbr Atom = 0x12103 + Width Atom = 0x50705 + Wrap Atom = 0x58704 + Xmp Atom = 0xc103 +) + +const hash0 = 0xc17da63e + +const maxAtomLen = 19 + +var table = [1 << 9]Atom{ + 0x1: 0x48a0b, // onmousemove + 0x2: 0x5e209, // onwaiting + 0x3: 0x1fa13, // onautocompleteerror + 0x4: 0x5fa06, // prompt + 0x7: 0x5eb07, // optimum + 0x8: 0x1604, // mark + 0xa: 0x5ad07, // itemref + 0xb: 0x4fe0a, // onpageshow + 0xc: 0x57a06, // select + 0xd: 0x17b09, // draggable + 0xe: 0x3e03, // nav + 0xf: 0x17507, // command + 0x11: 0xb01, // u + 0x14: 0x2d507, // headers + 0x15: 0x44a08, // datalist + 0x17: 0x4e04, // samp + 0x1a: 0x3fb09, // onkeydown + 0x1b: 0x55f08, // onscroll + 0x1c: 0x15003, // col + 0x20: 0x3c908, // itemprop + 0x21: 0x2780a, // http-equiv + 0x22: 0x61d03, // sup + 0x24: 0x1d008, // required + 0x2b: 0x25e07, // preload + 0x2c: 0x6040d, // onbeforeprint + 0x2d: 0x3600b, // ondragenter + 0x2e: 0x50902, // dt + 0x2f: 0x5a708, // onsubmit + 0x30: 0x27002, // hr + 0x31: 0x32f0d, // oncontextmenu + 0x33: 0x29c05, // image + 0x34: 0x50d07, // onpause + 0x35: 0x25906, // hgroup + 0x36: 0x7704, // ping + 0x37: 0x57808, // onselect + 0x3a: 0x11303, // div + 0x3b: 0x1fa0e, // onautocomplete + 0x40: 0x2eb02, // mi + 0x41: 0x31c08, // seamless + 0x42: 0x2807, // charset + 0x43: 0x8502, // id + 0x44: 0x5200a, // onpopstate + 0x45: 0x3ef03, // del + 0x46: 0x2cb07, // marquee + 0x47: 0x3309, // accesskey + 0x49: 0x8d06, // footer + 0x4a: 0x44e04, // list + 0x4b: 0x2b005, // ismap + 0x51: 0x33804, // menu + 0x52: 0x2f04, // body + 0x55: 0x9a08, // frameset + 0x56: 0x54a07, // onreset + 0x57: 0x12705, // blink + 0x58: 0xa105, // title + 0x59: 0x38807, // article + 0x5b: 0x22e02, // th + 0x5d: 0x13101, // q + 0x5e: 0x3cf04, // open + 0x5f: 0x2fa04, // area + 0x61: 0x44206, // onload + 0x62: 0xda04, // font + 0x63: 0xd604, // base + 0x64: 0x16207, // colspan + 0x65: 0x53707, // keytype + 0x66: 0x11e02, // dl + 0x68: 0x1b008, // fieldset + 0x6a: 0x2eb03, // min + 0x6b: 0x11503, // var + 0x6f: 0x2d506, // header + 0x70: 0x13f02, // rt + 0x71: 0x15008, // colgroup + 0x72: 0x23502, // mn + 0x74: 0x13a07, // onabort + 0x75: 0x3906, // keygen + 0x76: 0x4c209, // onoffline + 0x77: 0x21f09, // challenge + 0x78: 0x2b203, // map + 0x7a: 0x2e902, // h4 + 0x7b: 0x3b607, // onerror + 0x7c: 0x2e109, // maxlength + 0x7d: 0x2f505, // mtext + 0x7e: 0xbb07, // sandbox + 0x7f: 0x58b06, // onsort + 0x80: 0x100a, // malignmark + 0x81: 0x45d04, // meta + 0x82: 0x7b05, // async + 0x83: 0x2a702, // h3 + 0x84: 0x26702, // dd + 0x85: 0x27004, // href + 0x86: 0x6e0a, // mediagroup + 0x87: 0x19406, // coords + 0x88: 0x41107, // srclang + 0x89: 0x34d0a, // ondblclick + 0x8a: 0x4005, // value + 0x8c: 0xe908, // oncancel + 0x8e: 0x3230a, // spellcheck + 0x8f: 0x9a05, // frame + 0x91: 0x12403, // big + 0x94: 0x1f606, // action + 0x95: 0x6903, // dir + 0x97: 0x2fb08, // readonly + 0x99: 0x42d05, // table + 0x9a: 0x61607, // summary + 0x9b: 0x12103, // wbr + 0x9c: 0x30a, // radiogroup + 0x9d: 0x6c04, // name + 0x9f: 0x62306, // system + 0xa1: 0x15d05, // color + 0xa2: 0x7f06, // canvas + 0xa3: 0x25504, // html + 0xa5: 0x56f09, // onseeking + 0xac: 0x4f905, // shape + 0xad: 0x25f03, // rel + 0xae: 0x28510, // oncanplaythrough + 0xaf: 0x3760a, // ondragover + 0xb0: 0x62608, // template + 0xb1: 0x1d80d, // foreignObject + 0xb3: 0x9204, // rows + 0xb6: 0x44e07, // listing + 0xb7: 0x49c06, // output + 0xb9: 0x3310b, // contextmenu + 0xbb: 0x11f03, // low + 0xbc: 0x1c602, // rp + 0xbd: 0x5bb09, // onsuspend + 0xbe: 0x13606, // button + 0xbf: 0x4db04, // desc + 0xc1: 0x4e207, // section + 0xc2: 0x52a0a, // onprogress + 0xc3: 0x59e09, // onstorage + 0xc4: 0x2d204, // math + 0xc5: 0x4503, // alt + 0xc7: 0x8a02, // ul + 0xc8: 0x5107, // pattern + 0xc9: 0x4b60c, // onmousewheel + 0xca: 0x35709, // ondragend + 0xcb: 0xaf04, // ruby + 0xcc: 0xc01, // p + 0xcd: 0x31707, // onclose + 0xce: 0x24205, // meter + 0xcf: 0x11807, // bgsound + 0xd2: 0x25106, // height + 0xd4: 0x101, // b + 0xd5: 0x2c308, // itemtype + 0xd8: 0x1bb07, // caption + 0xd9: 0x10c08, // disabled + 0xdb: 0x33808, // menuitem + 0xdc: 0x62003, // svg + 0xdd: 0x18f05, // small + 0xde: 0x44a04, // data + 0xe0: 0x4cb08, // ononline + 0xe1: 0x2a206, // mglyph + 0xe3: 0x6505, // embed + 0xe4: 0x10502, // tr + 0xe5: 0x46b0b, // onloadstart + 0xe7: 0x3c306, // srcdoc + 0xeb: 0x5c408, // ontoggle + 0xed: 0xe703, // bdo + 0xee: 0x4702, // td + 0xef: 0x8305, // aside + 0xf0: 0x29402, // h2 + 0xf1: 0x52c08, // progress + 0xf2: 0x12c0a, // blockquote + 0xf4: 0xf005, // label + 0xf5: 0x601, // i + 0xf7: 0x9207, // rowspan + 0xfb: 0x51709, // onplaying + 0xfd: 0x2a103, // img + 0xfe: 0xf608, // optgroup + 0xff: 0x42307, // content + 0x101: 0x53e0c, // onratechange + 0x103: 0x3da0c, // onhashchange + 0x104: 0x4807, // details + 0x106: 0x40008, // download + 0x109: 0x14009, // translate + 0x10b: 0x4230f, // contenteditable + 0x10d: 0x36b0b, // ondragleave + 0x10e: 0x2106, // accept + 0x10f: 0x57a08, // selected + 0x112: 0x1f20a, // formaction + 0x113: 0x5b506, // center + 0x115: 0x45510, // onloadedmetadata + 0x116: 0x12804, // link + 0x117: 0xdd04, // time + 0x118: 0x19f0b, // crossorigin + 0x119: 0x3bd07, // onfocus + 0x11a: 0x58704, // wrap + 0x11b: 0x42204, // icon + 0x11d: 0x28105, // video + 0x11e: 0x4de05, // class + 0x121: 0x5d40e, // onvolumechange + 0x122: 0xaa06, // onblur + 0x123: 0x2b909, // itemscope + 0x124: 0x61105, // style + 0x127: 0x41e06, // public + 0x129: 0x2320e, // formnovalidate + 0x12a: 0x58206, // onshow + 0x12c: 0x51706, // onplay + 0x12d: 0x3c804, // cite + 0x12e: 0x2bc02, // ms + 0x12f: 0xdb0c, // ontimeupdate + 0x130: 0x10904, // kind + 0x131: 0x2470a, // formtarget + 0x135: 0x3af07, // onended + 0x136: 0x26506, // hidden + 0x137: 0x2c01, // s + 0x139: 0x2280a, // formmethod + 0x13a: 0x3e805, // input + 0x13c: 0x50b02, // h6 + 0x13d: 0xc902, // ol + 0x13e: 0x3420b, // oncuechange + 0x13f: 0x1e50d, // foreignobject + 0x143: 0x4e70e, // onbeforeunload + 0x144: 0x2bd05, // scope + 0x145: 0x39609, // onemptied + 0x146: 0x14b05, // defer + 0x147: 0xc103, // xmp + 0x148: 0x39f10, // ondurationchange + 0x149: 0x1903, // kbd + 0x14c: 0x47609, // onmessage + 0x14d: 0x60006, // option + 0x14e: 0x2eb09, // minlength + 0x14f: 0x32807, // checked + 0x150: 0xce08, // autoplay + 0x152: 0x202, // br + 0x153: 0x2360a, // novalidate + 0x156: 0x6307, // noembed + 0x159: 0x31007, // onclick + 0x15a: 0x47f0b, // onmousedown + 0x15b: 0x3a708, // onchange + 0x15e: 0x3f209, // oninvalid + 0x15f: 0x2bd06, // scoped + 0x160: 0x18808, // controls + 0x161: 0x30b05, // muted + 0x162: 0x58d08, // sortable + 0x163: 0x51106, // usemap + 0x164: 0x1b80a, // figcaption + 0x165: 0x35706, // ondrag + 0x166: 0x26b04, // high + 0x168: 0x3c303, // src + 0x169: 0x15706, // poster + 0x16b: 0x1670e, // annotation-xml + 0x16c: 0x5f704, // step + 0x16d: 0x4, // abbr + 0x16e: 0x1b06, // dialog + 0x170: 0x1202, // li + 0x172: 0x3ed02, // mo + 0x175: 0x1d803, // for + 0x176: 0x1a803, // ins + 0x178: 0x55504, // size + 0x179: 0x43210, // onlanguagechange + 0x17a: 0x8607, // default + 0x17b: 0x1a03, // bdi + 0x17c: 0x4d30a, // onpagehide + 0x17d: 0x6907, // dirname + 0x17e: 0x21404, // type + 0x17f: 0x1f204, // form + 0x181: 0x28509, // oncanplay + 0x182: 0x6103, // dfn + 0x183: 0x46308, // tabindex + 0x186: 0x6502, // em + 0x187: 0x27404, // lang + 0x189: 0x39108, // dropzone + 0x18a: 0x4080a, // onkeypress + 0x18b: 0x23c08, // datetime + 0x18c: 0x16204, // cols + 0x18d: 0x1, // a + 0x18e: 0x4420c, // onloadeddata + 0x190: 0xa605, // audio + 0x192: 0x2e05, // tbody + 0x193: 0x22c06, // method + 0x195: 0xf404, // loop + 0x196: 0x29606, // iframe + 0x198: 0x2d504, // head + 0x19e: 0x5f108, // manifest + 0x19f: 0xb309, // autofocus + 0x1a0: 0x14904, // code + 0x1a1: 0x55906, // strong + 0x1a2: 0x30308, // multiple + 0x1a3: 0xc05, // param + 0x1a6: 0x21107, // enctype + 0x1a7: 0x5b304, // face + 0x1a8: 0xfd09, // plaintext + 0x1a9: 0x26e02, // h1 + 0x1aa: 0x59509, // onstalled + 0x1ad: 0x3d406, // script + 0x1ae: 0x2db06, // spacer + 0x1af: 0x55108, // onresize + 0x1b0: 0x4a20b, // onmouseover + 0x1b1: 0x5cc08, // onunload + 0x1b2: 0x56708, // onseeked + 0x1b4: 0x2140d, // typemustmatch + 0x1b5: 0x1cc06, // figure + 0x1b6: 0x4950a, // onmouseout + 0x1b7: 0x25e03, // pre + 0x1b8: 0x50705, // width + 0x1b9: 0x19906, // sorted + 0x1bb: 0x5704, // nobr + 0x1be: 0x5302, // tt + 0x1bf: 0x1105, // align + 0x1c0: 0x3e607, // oninput + 0x1c3: 0x41807, // onkeyup + 0x1c6: 0x1c00c, // onafterprint + 0x1c7: 0x210e, // accept-charset + 0x1c8: 0x33c06, // itemid + 0x1c9: 0x3e809, // inputmode + 0x1cb: 0x53306, // strike + 0x1cc: 0x5a903, // sub + 0x1cd: 0x10505, // track + 0x1ce: 0x38605, // start + 0x1d0: 0xd608, // basefont + 0x1d6: 0x1aa06, // source + 0x1d7: 0x18206, // legend + 0x1d8: 0x2d405, // thead + 0x1da: 0x8c05, // tfoot + 0x1dd: 0x1ec06, // object + 0x1de: 0x6e05, // media + 0x1df: 0x1670a, // annotation + 0x1e0: 0x20d0b, // formenctype + 0x1e2: 0x3d208, // noscript + 0x1e4: 0x55505, // sizes + 0x1e5: 0x1fc0c, // autocomplete + 0x1e6: 0x9504, // span + 0x1e7: 0x9808, // noframes + 0x1e8: 0x24b06, // target + 0x1e9: 0x38f06, // ondrop + 0x1ea: 0x2b306, // applet + 0x1ec: 0x5a08, // reversed + 0x1f0: 0x2a907, // isindex + 0x1f3: 0x27008, // hreflang + 0x1f5: 0x2f302, // h5 + 0x1f6: 0x4f307, // address + 0x1fa: 0x2e103, // max + 0x1fb: 0xc30b, // placeholder + 0x1fc: 0x2f608, // textarea + 0x1fe: 0x4ad09, // onmouseup + 0x1ff: 0x3800b, // ondragstart +} + +const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" + + "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" + + "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" + + "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" + + "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" + + "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" + + "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" + + "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" + + "bjectforeignobjectformactionautocompleteerrorformenctypemust" + + "matchallengeformmethodformnovalidatetimeterformtargetheightm" + + "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" + + "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" + + "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" + + "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" + + "hangeondblclickondragendondragenterondragleaveondragoverondr" + + "agstarticleondropzonemptiedondurationchangeonendedonerroronf" + + "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" + + "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" + + "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" + + "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" + + "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" + + "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" + + "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" + + "ollonseekedonseekingonselectedonshowraponsortableonstalledon" + + "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" + + "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" + + "mmarysupsvgsystemplate" diff --git a/vendor/golang.org/x/net/html/atom/table_test.go b/vendor/golang.org/x/net/html/atom/table_test.go new file mode 100644 index 00000000000..0f2ecce4fd4 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table_test.go @@ -0,0 +1,351 @@ +// generated by go run gen.go -test; DO NOT EDIT + +package atom + +var testAtomList = []string{ + "a", + "abbr", + "abbr", + "accept", + "accept-charset", + "accesskey", + "action", + "address", + "align", + "alt", + "annotation", + "annotation-xml", + "applet", + "area", + "article", + "aside", + "async", + "audio", + "autocomplete", + "autofocus", + "autoplay", + "b", + "base", + "basefont", + "bdi", + "bdo", + "bgsound", + "big", + "blink", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "center", + "challenge", + "charset", + "checked", + "cite", + "cite", + "class", + "code", + "col", + "colgroup", + "color", + "cols", + "colspan", + "command", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "data", + "datalist", + "datetime", + "dd", + "default", + "defer", + "del", + "desc", + "details", + "dfn", + "dialog", + "dir", + "dirname", + "disabled", + "div", + "dl", + "download", + "draggable", + "dropzone", + "dt", + "em", + "embed", + "enctype", + "face", + "fieldset", + "figcaption", + "figure", + "font", + "footer", + "for", + "foreignObject", + "foreignobject", + "form", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "frame", + "frameset", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "headers", + "height", + "hgroup", + "hidden", + "high", + "hr", + "href", + "hreflang", + "html", + "http-equiv", + "i", + "icon", + "id", + "iframe", + "image", + "img", + "input", + "inputmode", + "ins", + "isindex", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "kbd", + "keygen", + "keytype", + "kind", + "label", + "label", + "lang", + "legend", + "li", + "link", + "list", + "listing", + "loop", + "low", + "malignmark", + "manifest", + "map", + "mark", + "marquee", + "math", + "max", + "maxlength", + "media", + "mediagroup", + "menu", + "menuitem", + "meta", + "meter", + "method", + "mglyph", + "mi", + "min", + "minlength", + "mn", + "mo", + "ms", + "mtext", + "multiple", + "muted", + "name", + "nav", + "nobr", + "noembed", + "noframes", + "noscript", + "novalidate", + "object", + "ol", + "onabort", + "onafterprint", + "onautocomplete", + "onautocompleteerror", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncuechange", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadstart", + "onmessage", + "onmousedown", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onreset", + "onresize", + "onscroll", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunload", + "onvolumechange", + "onwaiting", + "open", + "optgroup", + "optimum", + "option", + "output", + "p", + "param", + "pattern", + "ping", + "placeholder", + "plaintext", + "poster", + "pre", + "preload", + "progress", + "prompt", + "public", + "q", + "radiogroup", + "readonly", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "rp", + "rt", + "ruby", + "s", + "samp", + "sandbox", + "scope", + "scoped", + "script", + "seamless", + "section", + "select", + "selected", + "shape", + "size", + "sizes", + "small", + "sortable", + "sorted", + "source", + "spacer", + "span", + "span", + "spellcheck", + "src", + "srcdoc", + "srclang", + "start", + "step", + "strike", + "strong", + "style", + "style", + "sub", + "summary", + "sup", + "svg", + "system", + "tabindex", + "table", + "target", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "title", + "tr", + "track", + "translate", + "tt", + "type", + "typemustmatch", + "u", + "ul", + "usemap", + "value", + "var", + "video", + "wbr", + "width", + "wrap", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 00000000000..13bed1599f7 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/vendor/golang.org/x/net/html/charset/charset_test.go b/vendor/golang.org/x/net/html/charset/charset_test.go new file mode 100644 index 00000000000..e4e7d86bf9a --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset_test.go @@ -0,0 +1,237 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package charset + +import ( + "bytes" + "encoding/xml" + "io/ioutil" + "runtime" + "strings" + "testing" + + "golang.org/x/text/transform" +) + +func transformString(t transform.Transformer, s string) (string, error) { + r := transform.NewReader(strings.NewReader(s), t) + b, err := ioutil.ReadAll(r) + return string(b), err +} + +type testCase struct { + utf8, other, otherEncoding string +} + +// testCases for encoding and decoding. +var testCases = []testCase{ + {"Résumé", "Résumé", "utf8"}, + {"Résumé", "R\xe9sum\xe9", "latin1"}, + {"これは漢字です。", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"}, + {"これは漢字です。", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"}, + {"Hello, world", "Hello, world", "ASCII"}, + {"Gdańsk", "Gda\xf1sk", "ISO-8859-2"}, + {"Ââ Čč Đđ Ŋŋ Õõ Šš Žž Åå Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"}, + {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"}, + {"latviešu", "latvie\xf0u", "ISO-8859-13"}, + {"Seònaid", "Se\xf2naid", "ISO-8859-14"}, + {"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"}, + {"românește", "rom\xe2ne\xbate", "ISO-8859-16"}, + {"nutraĵo", "nutra\xbco", "ISO-8859-3"}, + {"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"}, + {"русский", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"}, + {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"}, + {"Kağan", "Ka\xf0an", "ISO-8859-9"}, + {"Résumé", "R\x8esum\x8e", "macintosh"}, + {"Gdańsk", "Gda\xf1sk", "windows-1250"}, + {"русский", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"}, + {"Résumé", "R\xe9sum\xe9", "windows-1252"}, + {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"}, + {"Kağan", "Ka\xf0an", "windows-1254"}, + {"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"}, + {"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"}, + {"latviešu", "latvie\xf0u", "windows-1257"}, + {"Việt", "Vi\xea\xf2t", "windows-1258"}, + {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"}, + {"русский", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"}, + {"українська", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"}, + {"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"}, + {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"}, + {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"}, + {"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"}, + {"㧯", "\x82\x31\x89\x38", "gb18030"}, + {"これは漢字です。", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"}, + {"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"}, + {"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"}, + {"これは漢字です。", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"}, + {"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"}, + {"다음과 같은 조건을 따라야 합니다: 저작자표시", "\xb4\xd9\xc0\xbd\xb0\xfa \xb0\xb0\xc0\xba \xc1\xb6\xb0\xc7\xc0\xbb \xb5\xfb\xb6\xf3\xbe\xdf \xc7մϴ\xd9: \xc0\xfa\xc0\xdb\xc0\xdaǥ\xbd\xc3", "EUC-KR"}, +} + +func TestDecode(t *testing.T) { + testCases := append(testCases, []testCase{ + // Replace multi-byte maximum subpart of ill-formed subsequence with + // single replacement character (WhatWG requirement). + {"Rés\ufffdumé", "Rés\xe1\x80umé", "utf8"}, + }...) + for _, tc := range testCases { + e, _ := Lookup(tc.otherEncoding) + if e == nil { + t.Errorf("%s: not found", tc.otherEncoding) + continue + } + s, err := transformString(e.NewDecoder(), tc.other) + if err != nil { + t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err) + continue + } + if s != tc.utf8 { + t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8) + } + } +} + +func TestEncode(t *testing.T) { + testCases := append(testCases, []testCase{ + // Use Go-style replacement. + {"Rés\xe1\x80umé", "Rés\ufffd\ufffdumé", "utf8"}, + // U+0144 LATIN SMALL LETTER N WITH ACUTE not supported by encoding. + {"Gdańsk", "Gdańsk", "ISO-8859-11"}, + {"\ufffd", "�", "ISO-8859-11"}, + {"a\xe1\x80b", "a��b", "ISO-8859-11"}, + }...) + for _, tc := range testCases { + e, _ := Lookup(tc.otherEncoding) + if e == nil { + t.Errorf("%s: not found", tc.otherEncoding) + continue + } + s, err := transformString(e.NewEncoder(), tc.utf8) + if err != nil { + t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err) + continue + } + if s != tc.other { + t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other) + } + } +} + +var sniffTestCases = []struct { + filename, declared, want string +}{ + {"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"UTF-16LE-BOM.html", "", "utf-16le"}, + {"UTF-16BE-BOM.html", "", "utf-16be"}, + {"meta-content-attribute.html", "text/html", "iso-8859-15"}, + {"meta-charset-attribute.html", "text/html", "iso-8859-15"}, + {"No-encoding-declaration.html", "text/html", "utf-8"}, + {"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"}, + {"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, + {"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"}, + {"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"}, +} + +func TestSniff(t *testing.T) { + switch runtime.GOOS { + case "nacl": // platforms that don't permit direct file system access + t.Skipf("not supported on %q", runtime.GOOS) + } + + for _, tc := range sniffTestCases { + content, err := ioutil.ReadFile("testdata/" + tc.filename) + if err != nil { + t.Errorf("%s: error reading file: %v", tc.filename, err) + continue + } + + _, name, _ := DetermineEncoding(content, tc.declared) + if name != tc.want { + t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want) + continue + } + } +} + +func TestReader(t *testing.T) { + switch runtime.GOOS { + case "nacl": // platforms that don't permit direct file system access + t.Skipf("not supported on %q", runtime.GOOS) + } + + for _, tc := range sniffTestCases { + content, err := ioutil.ReadFile("testdata/" + tc.filename) + if err != nil { + t.Errorf("%s: error reading file: %v", tc.filename, err) + continue + } + + r, err := NewReader(bytes.NewReader(content), tc.declared) + if err != nil { + t.Errorf("%s: error creating reader: %v", tc.filename, err) + continue + } + + got, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err) + continue + } + + e, _ := Lookup(tc.want) + want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder())) + if err != nil { + t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err) + continue + } + + if !bytes.Equal(got, want) { + t.Errorf("%s: got %q, want %q", tc.filename, got, want) + continue + } + } +} + +var metaTestCases = []struct { + meta, want string +}{ + {"", ""}, + {"text/html", ""}, + {"text/html; charset utf-8", ""}, + {"text/html; charset=latin-2", "latin-2"}, + {"text/html; charset; charset = utf-8", "utf-8"}, + {`charset="big5"`, "big5"}, + {"charset='shift_jis'", "shift_jis"}, +} + +func TestFromMeta(t *testing.T) { + for _, tc := range metaTestCases { + got := fromMetaElement(tc.meta) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want) + } + } +} + +func TestXML(t *testing.T) { + const s = "r\xe9sum\xe9" + + d := xml.NewDecoder(strings.NewReader(s)) + d.CharsetReader = NewReaderLabel + + var a struct { + Word string + } + err := d.Decode(&a) + if err != nil { + t.Fatalf("Decode: %v", err) + } + + want := "résumé" + if a.Word != want { + t.Errorf("got %q, want %q", a.Word, want) + } +} diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html new file mode 100644 index 00000000000..9915fa0ee4f --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html @@ -0,0 +1,48 @@ + + + + HTTP charset + + + + + + + + + + + +

HTTP charset

+ + +
+ + +
 
+ + + + + +
+

The character encoding of a page can be set using the HTTP header charset declaration.

+

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.

+
+
+
HTML5
+

the-input-byte-stream-001
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html new file mode 100644 index 00000000000..26e5d8b4ebf --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html @@ -0,0 +1,48 @@ + + + + HTTP vs UTF-8 BOM + + + + + + + + + + + +

HTTP vs UTF-8 BOM

+ + +
+ + +
 
+ + + + + +
+

A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

If the test is unsuccessful, the characters  should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.

+
+
+
HTML5
+

the-input-byte-stream-034
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html new file mode 100644 index 00000000000..2f07e95158e --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta charset + + + + + + + + + + + +

HTTP vs meta charset

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-018
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html new file mode 100644 index 00000000000..6853cddeccc --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta content + + + + + + + + + + + +

HTTP vs meta content

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-016
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html new file mode 100644 index 00000000000..612e26c6c52 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html @@ -0,0 +1,47 @@ + + + + No encoding declaration + + + + + + + + + + + +

No encoding declaration

+ + +
+ + +
 
+ + + + + +
+

A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.

+

The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-015
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/README b/vendor/golang.org/x/net/html/charset/testdata/README new file mode 100644 index 00000000000..38ef0f9f121 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/README @@ -0,0 +1,9 @@ +These test cases come from +http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics + +Distributed under both the W3C Test Suite License +(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license) +and the W3C 3-clause BSD License +(http://www.w3.org/Consortium/Legal/2008/03-bsd-license). +To contribute to a W3C Test Suite, see the policies and contribution +forms (http://www.w3.org/2004/10/27-testcases). diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html new file mode 100644 index 00000000000..3abf7a9343c Binary files /dev/null and b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html differ diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html new file mode 100644 index 00000000000..76254c980c2 Binary files /dev/null and b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html differ diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html new file mode 100644 index 00000000000..83de43338ec --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + UTF-8 BOM vs meta charset + + + + + + + + + + + +

UTF-8 BOM vs meta charset

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.

+

The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-038
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html new file mode 100644 index 00000000000..501aac2d6a5 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html @@ -0,0 +1,48 @@ + + + + UTF-8 BOM vs meta content + + + + + + + + + + + +

UTF-8 BOM vs meta content

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.

+

The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-037
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html new file mode 100644 index 00000000000..2d7d25aba14 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html @@ -0,0 +1,48 @@ + + + + meta charset attribute + + + + + + + + + + + +

meta charset attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with charset attribute.

+

The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-009
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html new file mode 100644 index 00000000000..1c3f228e7c9 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html @@ -0,0 +1,48 @@ + + + + meta content attribute + + + + + + + + + + + +

meta content attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with http-equiv and content attributes.

+

The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-007
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 00000000000..52f651ff6db --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,102 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.3.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "isindex": true, + "li": true, + "link": true, + "listing": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "svg": + return element.Data == "foreignObject" + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 00000000000..94f496874ab --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case ErrorToken: + return z.Err() + case TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case StartTagToken, EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 00000000000..c484e5a94fb --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 00000000000..a50c04c60e9 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/entity_test.go b/vendor/golang.org/x/net/html/entity_test.go new file mode 100644 index 00000000000..b53f866fa2d --- /dev/null +++ b/vendor/golang.org/x/net/html/entity_test.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "testing" + "unicode/utf8" +) + +func TestEntityLength(t *testing.T) { + // We verify that the length of UTF-8 encoding of each value is <= 1 + len(key). + // The +1 comes from the leading "&". This property implies that the length of + // unescaped text is <= the length of escaped text. + for k, v := range entity { + if 1+len(k) < utf8.RuneLen(v) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v)) + } + if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' { + t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon) + } + } + for k, v := range entity2 { + if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1])) + } + } +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 00000000000..d8561396200 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/escape_test.go b/vendor/golang.org/x/net/html/escape_test.go new file mode 100644 index 00000000000..b405d4b4a77 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape_test.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import "testing" + +type unescapeTest struct { + // A short description of the test case. + desc string + // The HTML text. + html string + // The unescaped text. + unescaped string +} + +var unescapeTests = []unescapeTest{ + // Handle no entities. + { + "copy", + "A\ttext\nstring", + "A\ttext\nstring", + }, + // Handle simple named entities. + { + "simple", + "& > <", + "& > <", + }, + // Handle hitting the end of the string. + { + "stringEnd", + "& &", + "& &", + }, + // Handle entities with two codepoints. + { + "multiCodepoint", + "text ⋛︀ blah", + "text \u22db\ufe00 blah", + }, + // Handle decimal numeric entities. + { + "decimalEntity", + "Delta = Δ ", + "Delta = Δ ", + }, + // Handle hexadecimal numeric entities. + { + "hexadecimalEntity", + "Lambda = λ = λ ", + "Lambda = λ = λ ", + }, + // Handle numeric early termination. + { + "numericEnds", + "&# &#x €43 © = ©f = ©", + "&# &#x €43 © = ©f = ©", + }, + // Handle numeric ISO-8859-1 entity replacements. + { + "numericReplacements", + "Footnote‡", + "Footnote‡", + }, +} + +func TestUnescape(t *testing.T) { + for _, tt := range unescapeTests { + unescaped := UnescapeString(tt.html) + if unescaped != tt.unescaped { + t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped) + } + } +} + +func TestUnescapeEscape(t *testing.T) { + ss := []string{ + ``, + `abc def`, + `a & b`, + `a&b`, + `a & b`, + `"`, + `"`, + `"<&>"`, + `"<&>"`, + `3&5==1 && 0<1, "0<1", a+acute=á`, + `The special characters are: <, >, &, ' and "`, + } + for _, s := range ss { + if got := UnescapeString(EscapeString(s)); got != s { + t.Errorf("got %q want %q", got, s) + } + } +} diff --git a/vendor/golang.org/x/net/html/example_test.go b/vendor/golang.org/x/net/html/example_test.go new file mode 100644 index 00000000000..0b06ed7730f --- /dev/null +++ b/vendor/golang.org/x/net/html/example_test.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This example demonstrates parsing HTML data and walking the resulting tree. +package html_test + +import ( + "fmt" + "log" + "strings" + + "golang.org/x/net/html" +) + +func ExampleParse() { + s := `

Links:

` + doc, err := html.Parse(strings.NewReader(s)) + if err != nil { + log.Fatal(err) + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + for _, a := range n.Attr { + if a.Key == "href" { + fmt.Println(a.Val) + break + } + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + // Output: + // foo + // /bar/baz +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 00000000000..d3b3844099b --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.5.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.5.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.5.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 00000000000..26b657aec83 --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,193 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + scopeMarkerNode +) + +// Section 12.2.3.3 says "scope markers are inserted when entering applet +// elements, buttons, object elements, marquees, table cells, and table +// captions, and are used to prevent formatting from 'leaking'". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} diff --git a/vendor/golang.org/x/net/html/node_test.go b/vendor/golang.org/x/net/html/node_test.go new file mode 100644 index 00000000000..471102f3a22 --- /dev/null +++ b/vendor/golang.org/x/net/html/node_test.go @@ -0,0 +1,146 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "fmt" +) + +// checkTreeConsistency checks that a node and its descendants are all +// consistent in their parent/child/sibling relationships. +func checkTreeConsistency(n *Node) error { + return checkTreeConsistency1(n, 0) +} + +func checkTreeConsistency1(n *Node, depth int) error { + if depth == 1e4 { + return fmt.Errorf("html: tree looks like it contains a cycle") + } + if err := checkNodeConsistency(n); err != nil { + return err + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + if err := checkTreeConsistency1(c, depth+1); err != nil { + return err + } + } + return nil +} + +// checkNodeConsistency checks that a node's parent/child/sibling relationships +// are consistent. +func checkNodeConsistency(n *Node) error { + if n == nil { + return nil + } + + nParent := 0 + for p := n.Parent; p != nil; p = p.Parent { + nParent++ + if nParent == 1e4 { + return fmt.Errorf("html: parent list looks like an infinite loop") + } + } + + nForward := 0 + for c := n.FirstChild; c != nil; c = c.NextSibling { + nForward++ + if nForward == 1e6 { + return fmt.Errorf("html: forward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + nBackward := 0 + for c := n.LastChild; c != nil; c = c.PrevSibling { + nBackward++ + if nBackward == 1e6 { + return fmt.Errorf("html: backward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + if n.Parent != nil { + if n.Parent == n { + return fmt.Errorf("html: inconsistent parent relationship") + } + if n.Parent == n.FirstChild { + return fmt.Errorf("html: inconsistent parent/first relationship") + } + if n.Parent == n.LastChild { + return fmt.Errorf("html: inconsistent parent/last relationship") + } + if n.Parent == n.PrevSibling { + return fmt.Errorf("html: inconsistent parent/prev relationship") + } + if n.Parent == n.NextSibling { + return fmt.Errorf("html: inconsistent parent/next relationship") + } + + parentHasNAsAChild := false + for c := n.Parent.FirstChild; c != nil; c = c.NextSibling { + if c == n { + parentHasNAsAChild = true + break + } + } + if !parentHasNAsAChild { + return fmt.Errorf("html: inconsistent parent/child relationship") + } + } + + if n.PrevSibling != nil && n.PrevSibling.NextSibling != n { + return fmt.Errorf("html: inconsistent prev/next relationship") + } + if n.NextSibling != nil && n.NextSibling.PrevSibling != n { + return fmt.Errorf("html: inconsistent next/prev relationship") + } + + if (n.FirstChild == nil) != (n.LastChild == nil) { + return fmt.Errorf("html: inconsistent first/last relationship") + } + if n.FirstChild != nil && n.FirstChild == n.LastChild { + // We have a sole child. + if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil { + return fmt.Errorf("html: inconsistent sole child's sibling relationship") + } + } + + seen := map[*Node]bool{} + + var last *Node + for c := n.FirstChild; c != nil; c = c.NextSibling { + if seen[c] { + return fmt.Errorf("html: inconsistent repeated child") + } + seen[c] = true + last = c + } + if last != n.LastChild { + return fmt.Errorf("html: inconsistent last relationship") + } + + var first *Node + for c := n.LastChild; c != nil; c = c.PrevSibling { + if !seen[c] { + return fmt.Errorf("html: inconsistent missing child") + } + delete(seen, c) + first = c + } + if first != n.FirstChild { + return fmt.Errorf("html: inconsistent first relationship") + } + + if len(seen) != 0 { + return fmt.Errorf("html: inconsistent forwards/backwards child list") + } + + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 00000000000..be4b2bf5aa9 --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2094 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.3.2) and active formatting + // elements (section 12.2.3.3). + oe, afe nodeStack + // Element pointers (section 12.2.3.4). + head, form *Node + // Other parsing state flags (section 12.2.3.5). + scripting, framesetOK bool + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.5.3). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.3.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.3.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.5.3, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.3.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.3.3. +func (p *parser) clearActiveFormattingElements() { + for { + n := p.afe.pop() + if len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.3.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.4. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.3.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.3.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.3.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if i == 0 && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + p.im = inSelectIM + case a.Td, a.Th: + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Head: + p.im = inBodyIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + p.im = beforeHeadIM + default: + continue + } + return + } + p.im = inBodyIM +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.5.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.5.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.5.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.5.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + n := p.oe.pop() + if n.DataAtom != a.Head { + panic("html: bad parser state: element not found, in the in-head insertion mode") + } + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.5.4.6. +func afterHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Body: + p.addElement() + p.framesetOK = false + p.im = inBodyIM + return true + case a.Frameset: + p.addElement() + p.im = inFramesetIM + return true + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: + p.oe = append(p.oe, p.head) + defer p.oe.remove(p.head) + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Body, a.Html, a.Br: + // Drop down to creating an implied tag. + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.5.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form == nil {
+				p.popUntil(buttonScope, a.P)
+				p.addElement()
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A)
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr)
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Isindex:
+			if p.form != nil {
+				// Ignore the token.
+				return true
+			}
+			action := ""
+			prompt := "This is a searchable index. Enter search keywords: "
+			attr := []Attribute{{Key: "name", Val: "isindex"}}
+			for _, t := range p.tok.Attr {
+				switch t.Key {
+				case "action":
+					action = t.Val
+				case "name":
+					// Ignore the attribute.
+				case "prompt":
+					prompt = t.Val
+				default:
+					attr = append(attr, t)
+				}
+			}
+			p.acknowledgeSelfClosingTag()
+			p.popUntil(buttonScope, a.P)
+			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+			if action != "" {
+				p.form.Attr = []Attribute{{Key: "action", Val: action}}
+			}
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+			p.addText(prompt)
+			p.addChild(&Node{
+				Type:     ElementNode,
+				DataAtom: a.Input,
+				Data:     a.Input.String(),
+				Attr:     attr,
+			})
+			p.oe.pop()
+			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Iframe:
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Noembed, a.Noscript:
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			node := p.form
+			p.form = nil
+			i := p.indexOfElementInScope(defaultScope, a.Form)
+			if node == nil || i == -1 || p.oe[i] != node {
+				// Ignore the token.
+				return true
+			}
+			p.generateImpliedEndTags()
+			p.oe.remove(node)
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-4. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 5. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom)
+			return
+		}
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Steps 9-10. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 11-12. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 13. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Steps 13.1-13.2
+		for j := 0; j < 3; j++ {
+			// Step 13.3.
+			x--
+			node = p.oe[x]
+			// Step 13.4 - 13.5.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 13.6.
+			if node == formattingElement {
+				break
+			}
+			// Step 13.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 13.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 13.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 13.10.
+			lastNode = node
+		}
+
+		// Step 14. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 15-17. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 18. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 19. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		if p.oe[i].DataAtom == tagAtom {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.5.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a -->
+#errors
+#document
+| 
+|   
+|   
+|     -->
+#errors
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
+Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
+Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
+Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
+Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
+Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
+Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
+Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
+Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
+Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
+Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
+Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
+Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
+Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
+Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
+Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
+Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
+Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
+Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
+Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
+Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
+Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
+Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 140 This element (img) has no end tag.
+Line: 1 Col: 148 Unexpected end tag (title). Ignored.
+Line: 1 Col: 155 Unexpected end tag (span). Ignored.
+Line: 1 Col: 163 Unexpected end tag (style). Ignored.
+Line: 1 Col: 172 Unexpected end tag (script). Ignored.
+Line: 1 Col: 180 Unexpected end tag (table). Ignored.
+Line: 1 Col: 185 Unexpected end tag (th). Ignored.
+Line: 1 Col: 190 Unexpected end tag (td). Ignored.
+Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 203 This element (frame) has no end tag.
+Line: 1 Col: 210 This element (area) has no end tag.
+Line: 1 Col: 217 Unexpected end tag (link). Ignored.
+Line: 1 Col: 225 This element (param) has no end tag.
+Line: 1 Col: 230 This element (hr) has no end tag.
+Line: 1 Col: 238 This element (input) has no end tag.
+Line: 1 Col: 244 Unexpected end tag (col). Ignored.
+Line: 1 Col: 251 Unexpected end tag (base). Ignored.
+Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
+Line: 1 Col: 269 This element (basefont) has no end tag.
+Line: 1 Col: 279 This element (bgsound) has no end tag.
+Line: 1 Col: 287 This element (embed) has no end tag.
+Line: 1 Col: 296 This element (spacer) has no end tag.
+Line: 1 Col: 300 Unexpected end tag (p). Ignored.
+Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
+Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
+Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
+Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
+Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
+Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
+Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
+Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
+Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
+Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
+Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
+Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
+Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
+Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 460 This element (wbr) has no end tag.
+Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
+Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
+Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
+Line: 1 Col: 513 Unexpected end tag (html). Ignored.
+Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
+Line: 1 Col: 520 Unexpected end tag (head). Ignored.
+Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
+Line: 1 Col: 537 This element (image) has no end tag.
+Line: 1 Col: 547 This element (isindex) has no end tag.
+Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
+Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
+Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
+Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
+Line: 1 Col: 599 Unexpected end tag (option). Ignored.
+Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
+Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
+#document
+| 
+|   
+|   
+|     
+|

+ +#data +

+#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode. +Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode. +Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode. +Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode. +Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode. +Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode. +Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode. +Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode. +Line: 1 Col: 58 Unexpected end tag (blink). Ignored. +Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode. +Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode. +Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag. +Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode. +Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode. +Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode. +Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode. +Line: 1 Col: 99 Unexpected end tag (select). Ignored. +Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode. +Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag. +Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode. +Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag. +Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode. +Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag. +Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode. +Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag. +Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode. +Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag. +Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode. +Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag. +Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored. +Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode. +Line: 1 Col: 141 Unexpected end tag (br). Treated as br element. +Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode. +Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode. +Line: 1 Col: 151 This element (img) has no end tag. +Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode. +Line: 1 Col: 159 Unexpected end tag (title). Ignored. +Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode. +Line: 1 Col: 166 Unexpected end tag (span). Ignored. +Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode. +Line: 1 Col: 174 Unexpected end tag (style). Ignored. +Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode. +Line: 1 Col: 183 Unexpected end tag (script). Ignored. +Line: 1 Col: 196 Unexpected end tag (th). Ignored. +Line: 1 Col: 201 Unexpected end tag (td). Ignored. +Line: 1 Col: 206 Unexpected end tag (tr). Ignored. +Line: 1 Col: 214 This element (frame) has no end tag. +Line: 1 Col: 221 This element (area) has no end tag. +Line: 1 Col: 228 Unexpected end tag (link). Ignored. +Line: 1 Col: 236 This element (param) has no end tag. +Line: 1 Col: 241 This element (hr) has no end tag. +Line: 1 Col: 249 This element (input) has no end tag. +Line: 1 Col: 255 Unexpected end tag (col). Ignored. +Line: 1 Col: 262 Unexpected end tag (base). Ignored. +Line: 1 Col: 269 Unexpected end tag (meta). Ignored. +Line: 1 Col: 280 This element (basefont) has no end tag. +Line: 1 Col: 290 This element (bgsound) has no end tag. +Line: 1 Col: 298 This element (embed) has no end tag. +Line: 1 Col: 307 This element (spacer) has no end tag. +Line: 1 Col: 311 Unexpected end tag (p). Ignored. +Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag. +Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag. +Line: 1 Col: 331 Unexpected end tag (caption). Ignored. +Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 350 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 366 Unexpected end tag (thead). Ignored. +Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag. +Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag. +Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag. +Line: 1 Col: 404 Unexpected end tag (dir). Ignored. +Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag. +Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag. +Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag. +Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag. +Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag. +Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag. +Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag. +Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag. +Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 471 This element (wbr) has no end tag. +Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag. +Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag. +Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag. +Line: 1 Col: 524 Unexpected end tag (html). Ignored. +Line: 1 Col: 524 Unexpected end tag (frameset). Ignored. +Line: 1 Col: 531 Unexpected end tag (head). Ignored. +Line: 1 Col: 540 Unexpected end tag (iframe). Ignored. +Line: 1 Col: 548 This element (image) has no end tag. +Line: 1 Col: 558 This element (isindex) has no end tag. +Line: 1 Col: 568 Unexpected end tag (noembed). Ignored. +Line: 1 Col: 579 Unexpected end tag (noframes). Ignored. +Line: 1 Col: 590 Unexpected end tag (noscript). Ignored. +Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored. +Line: 1 Col: 610 Unexpected end tag (option). Ignored. +Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored. +Line: 1 Col: 633 Unexpected end tag (textarea). Ignored. +#document +| +| +| +|
+| +| +| +|

+ +#data + +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 1 Col: 10 Expected closing tag. Unexpected end of file. +#document +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat new file mode 100644 index 00000000000..4f8df86f208 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat @@ -0,0 +1,799 @@ +#data + +#errors +#document +| +| +| +| +| + +#data +a +#errors +29: Bogus comment +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +35: Stray “svg” start tag. +42: Stray end tag “svg” +#document +| +| +| +| +| +#errors +43: Stray “svg” start tag. +50: Stray end tag “svg” +#document +| +| +| +| +|

+#errors +34: Start tag “svg” seen in “table”. +41: Stray end tag “svg”. +#document +| +| +| +| +| +| + +#data +
foo
+#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +53: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| + +#data +
foobar
+#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +58: Stray end tag “g”. +65: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| + +#data +
foobar
+#errors +41: Start tag “svg” seen in “table”. +53: Stray end tag “g”. +65: Stray end tag “g”. +72: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| + +#data +
foobar
+#errors +45: Start tag “svg” seen in “table”. +57: Stray end tag “g”. +69: Stray end tag “g”. +76: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| +| + +#data +
foobar
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

quux +#errors +70: HTML start tag “p” in a foreign namespace context. +81: “table” closed but “caption” was still open. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" +|

+| "quux" + +#data +
foobarbaz

quux +#errors +78: “table” closed but “caption” was still open. +78: Unclosed elements on stack. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +| "baz" +|

+| "quux" + +#data +foobar

baz

quux +#errors +44: Start tag “svg” seen in “table”. +56: Stray end tag “g”. +68: Stray end tag “g”. +71: HTML start tag “p” in a foreign namespace context. +71: Start tag “p” seen in “table”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" +| +| +|

+| "quux" + +#data +

quux +#errors +50: Stray “svg” start tag. +54: Stray “g” start tag. +62: Stray end tag “g” +66: Stray “g” start tag. +74: Stray end tag “g” +77: Stray “p” start tag. +88: “table” end tag with “select” open. +#document +| +| +| +| +| +| +| +|
+|

quux +#errors +36: Start tag “select” seen in “table”. +42: Stray “svg” start tag. +46: Stray “g” start tag. +54: Stray end tag “g” +58: Stray “g” start tag. +66: Stray end tag “g” +69: Stray “p” start tag. +80: “table” end tag with “select” open. +#document +| +| +| +| +| +|

+| "quux" + +#data +foobar

baz +#errors +41: Stray “svg” start tag. +68: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +foobar

baz +#errors +34: Stray “svg” start tag. +61: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +

+#errors +31: Stray “svg” start tag. +35: Stray “g” start tag. +40: Stray end tag “g” +44: Stray “g” start tag. +49: Stray end tag “g” +52: Stray “p” start tag. +58: Stray “span” start tag. +58: End of file seen and there were open elements. +#document +| +| +| +| + +#data +

+#errors +42: Stray “svg” start tag. +46: Stray “g” start tag. +51: Stray end tag “g” +55: Stray “g” start tag. +60: Stray end tag “g” +63: Stray “p” start tag. +69: Stray “span” start tag. +#document +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| +| xlink href="foo" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data +bar +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" +| "bar" + +#data + +#errors +#document +| +| +| +| + +#data +

a +#errors +#document +| +| +| +|
+| +| "a" + +#data +
a +#errors +#document +| +| +| +|
+| +| +| "a" + +#data +
+#errors +#document +| +| +| +|
+| +| +| + +#data +
a +#errors +#document +| +| +| +|
+| +| +| +| +| "a" + +#data +

a +#errors +#document +| +| +| +|

+| +| +| +|

+| "a" + +#data +
    a +#errors +40: HTML start tag “ul” in a foreign namespace context. +41: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +|
    +| +|
      +| "a" + +#data +
        a +#errors +35: HTML start tag “ul” in a foreign namespace context. +36: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +| +|
          +| "a" + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +|

          +| +| +| +|

          +|

          + +#data +
          +#errors +#document +| +| +| +| +| +|
          +| +|
          +| +| + +#data +
          +#errors +#document +| +| +| +| +| +| +| +|
          +|
          +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data +

+#errors +#document +| +| +| +| +|
+| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| + +#data +
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat new file mode 100644 index 00000000000..638cde479f7 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat @@ -0,0 +1,482 @@ +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributename="" +| attributetype="" +| basefrequency="" +| baseprofile="" +| calcmode="" +| clippathunits="" +| contentscripttype="" +| contentstyletype="" +| diffuseconstant="" +| edgemode="" +| externalresourcesrequired="" +| filterres="" +| filterunits="" +| glyphref="" +| gradienttransform="" +| gradientunits="" +| kernelmatrix="" +| kernelunitlength="" +| keypoints="" +| keysplines="" +| keytimes="" +| lengthadjust="" +| limitingconeangle="" +| markerheight="" +| markerunits="" +| markerwidth="" +| maskcontentunits="" +| maskunits="" +| numoctaves="" +| pathlength="" +| patterncontentunits="" +| patterntransform="" +| patternunits="" +| pointsatx="" +| pointsaty="" +| pointsatz="" +| preservealpha="" +| preserveaspectratio="" +| primitiveunits="" +| refx="" +| refy="" +| repeatcount="" +| repeatdur="" +| requiredextensions="" +| requiredfeatures="" +| specularconstant="" +| specularexponent="" +| spreadmethod="" +| startoffset="" +| stddeviation="" +| stitchtiles="" +| surfacescale="" +| systemlanguage="" +| tablevalues="" +| targetx="" +| targety="" +| textlength="" +| viewbox="" +| viewtarget="" +| xchannelselector="" +| ychannelselector="" +| zoomandpan="" + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat new file mode 100644 index 00000000000..63107d277b6 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat @@ -0,0 +1,62 @@ +#data +

foobazeggs

spam

quuxbar +#errors +#document +| +| +| +| +|

+| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" + +#data +foobazeggs

spam
quuxbar +#errors +#document +| +| +| +| +| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat new file mode 100644 index 00000000000..b8713f88582 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat @@ -0,0 +1,74 @@ +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| abc:def="gh" +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| xml:lang="bar" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| 789="012" +| +| + +#data + +#errors +#document +| +| +| +| +| 789="012" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat new file mode 100644 index 00000000000..6ce1c0d1663 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat @@ -0,0 +1,208 @@ +#data +

X +#errors +Line: 1 Col: 31 Unexpected end tag (p). Ignored. +Line: 1 Col: 36 Expected closing tag. Unexpected end of file. +#document +| +| +| +| +|

+| +| +| +| +| +| +| " " +|

+| "X" + +#data +

+

X +#errors +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end tag (p). Ignored. +Line: 2 Col: 4 Expected closing tag. Unexpected end of file. +#document +| +| +| +|

+| +| +| +| +| +| +| " +" +|

+| "X" + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| +| " " + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| + +#data + +#errors +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| + +#data +X +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| +| "X" + +#data +<!doctype html><table> X<meta></table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " X" +| <meta> +| <table> + +#data +<!doctype html><table> x</table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> + +#data +<!doctype html><table> x </table> +#errors +Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x " +| <table> + +#data +<!doctype html><table><tr> x</table> +#errors +Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table>X<style> <tr>x </style> </table> +#errors +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" +| <table> +| <style> +| " <tr>x " +| " " + +#data +<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div> +#errors +Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode. +Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <a> +| "foo" +| <table> +| " " +| <tbody> +| <tr> +| <td> +| "bar" +| " " + +#data +<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes> +#errors +6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”. +13: Stray start tag “frame”. +21: Stray end tag “frame”. +29: Stray end tag “frame”. +39: “frameset” start tag after “body” already open. +105: End of file seen inside an [R]CDATA element. +105: End of file seen and there were open elements. +XXX: These errors are wrong, please fix me! +#document +| <html> +| <head> +| <frameset> +| <frame> +| <frameset> +| <frame> +| <noframes> +| "</frameset><noframes>" + +#data +<!DOCTYPE html><object></html> +#errors +1: Expected closing tag. Unexpected end of file +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat new file mode 100644 index 00000000000..c8ef66f0e6e --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat @@ -0,0 +1,2299 @@ +#data +<!doctype html><script> +#errors +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script>a +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<!doctype html><script>< +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<!doctype html><script></ +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<!doctype html><script></S +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<!doctype html><script></SC +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<!doctype html><script></SCR +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<!doctype html><script></SCRI +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<!doctype html><script></SCRIP +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script></s +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<!doctype html><script></sc +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<!doctype html><script></scr +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<!doctype html><script></scri +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<!doctype html><script></scrip +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script><! +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<!doctype html><script><!a +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<!doctype html><script><!- +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<!doctype html><script><!-a +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<!doctype html><script><!-- +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--a +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<!doctype html><script><!--< +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<!doctype html><script><!--<a +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<!doctype html><script><!--</ +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--<s +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<!doctype html><script><!--<script < +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<!doctype html><script><!--<script <a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<!doctype html><script><!--<script </ +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<!doctype html><script><!--<script </s +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 43 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<!doctype html><script><!--<script </scripta +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script> +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<!doctype html><script><!--<script </script/ +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<!doctype html><script><!--<script </script < +#errors +Line: 1 Col: 45 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<!doctype html><script><!--<script </script <a +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<!doctype html><script><!--<script </script </ +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script/ +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script - +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<!doctype html><script><!--<script -a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<!doctype html><script><!--<script -< +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -<" +| <body> + +#data +<!doctype html><script><!--<script -- +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<!doctype html><script><!--<script --a +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<!doctype html><script><!--<script --< +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --<" +| <body> + +#data +<!doctype html><script><!--<script --> +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script -->< +#errors +Line: 1 Col: 39 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<!doctype html><script><!--<script --></ +#errors +Line: 1 Col: 40 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script/ +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script><\/script>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<!doctype html><script><!--<script></scr'+'ipt>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>--><!--</script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-- ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- -></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- - ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<!doctype html><script><!--<script>--!></script>X +#errors +Line: 1 Col: 49 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<!doctype html><script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 59 Unexpected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<!doctype html><script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 57 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<!doctype html><style><!--<style></style>--></style> +#errors +Line: 1 Col: 52 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<!doctype html><style><!--</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<!doctype html><style><!--...</style>...--></style> +#errors +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<!doctype html><style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 66 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<!doctype html><style><!--...</style><!-- --><style>@import ...</style> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<!doctype html><style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 63 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<!doctype html><style>...<!--[if IE]><style>...</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<!doctype html><title><!--<title>--> +#errors +Line: 1 Col: 52 Unexpected end tag (title). +#document +| +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<!doctype html><title></title> +#errors +#document +| +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (title). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<!doctype html><noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 64 Unexpected end tag (noscript). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<!doctype html><noscript><!--</noscript>X<noscript>--></noscript> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<!doctype html><noscript><iframe></noscript>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<!doctype html><noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 64 Unexpected end tag (noframes). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<!doctype html><noframes><body><script><!--...</script></body></noframes></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<!doctype html><textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 64 Unexpected end tag (textarea). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<!doctype html><textarea></textarea></textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<!doctype html><textarea><</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<" + +#data +<!doctype html><textarea>a<b</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "a<b" + +#data +<!doctype html><iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 56 Unexpected end tag (iframe). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<!doctype html><iframe>...<!--X->...<!--/X->...</iframe> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<!doctype html><xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 44 Unexpected end tag (xmp). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<!doctype html><noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 60 Unexpected end tag (noembed). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script>a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<script>< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<script></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<script></S +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<script></SC +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<script></SCR +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<script></SCRI +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<script></SCRIP +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script></s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<script></sc +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<script></scr +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<script></scri +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<script></scrip +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script><! +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<script><!a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<script><!- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<script><!-a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<script><!-- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<script><!--< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<script><!--<a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<script><!--</ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--<s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 19 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<script><!--<script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<script><!--<script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<script><!--<script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<script><!--<script </s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<script><!--<script </scripta +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<script><!--<script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<script><!--<script </script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<script><!--<script </script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<script><!--<script </script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script - +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<script><!--<script -a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<script><!--<script -- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<script><!--<script --a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<script><!--<script --> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script -->< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<script><!--<script --></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script><\/script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<script><!--<script></scr'+'ipt>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<script><!--<script></script><script></script></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<script><!--<script></script><script></script>--><!--</script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<script><!--<script></script><script></script>-- ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<script><!--<script></script><script></script>- -></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<script><!--<script></script><script></script>- - ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<script><!--<script></script><script></script>-></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<script><!--<script>--!></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 44 Unexpected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 42 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<style><!--<style></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<style><!--</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<style><!--...</style>...--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 36 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<style><!--...</style><!-- --><style>@import ...</style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 48 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<style>...<!--[if IE]><style>...</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<title><!--<title>--> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (title). +#document +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<title></title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +#document +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end of file. Expected end tag (title). +#document +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noscript). +#document +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<noscript><!--</noscript>X<noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<noscript><iframe></noscript>X +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noframes). +#document +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<noframes><body><script><!--...</script></body></noframes></html> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +#document +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (textarea). +#document +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<textarea></textarea></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +Line: 1 Col: 41 Unexpected end tag (iframe). +#document +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<iframe>...<!--X->...<!--/X->...</iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end tag (xmp). +#document +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE. +Line: 1 Col: 45 Unexpected end tag (noembed). +#document +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<!doctype html><table> + +#errors +Line 2 Col 0 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " +" + +#data +<!doctype html><table><td><span><font></span><span> +#errors +Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase. +Line 1 Col 45 Unexpected end tag (span). +Line 1 Col 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <span> +| <font> +| <font> +| <span> + +#data +<!doctype html><form><table></form><form></table></form> +#errors +35: Stray end tag “form”. +41: Start tag “form” seen in “table”. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <table> +| <form> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat new file mode 100644 index 00000000000..7b555f888de --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat @@ -0,0 +1,153 @@ +#data +<!doctype html><table><tbody><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tr><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<!doctype html><table><tr><td><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <select> +| <td> + +#data +<!doctype html><table><tr><th><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <th> +| <select> +| <td> + +#data +<!doctype html><table><caption><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <select> +| <tbody> +| <tr> + +#data +<!doctype html><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><th> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tbody> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><thead> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><caption> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><table><tr></table>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| "a" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat new file mode 100644 index 00000000000..680e1f068a6 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat @@ -0,0 +1,269 @@ +#data +<!doctype html><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> + +#data +<!doctype html><table><tbody><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><caption><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><tr><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <style> +| "</script>" + +#data +<!doctype html><table><tr><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <script> +| "</style>" + +#data +<!doctype html><table><caption><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><table><td><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" + +#data +<!doctype html><table><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> + +#data +<!doctype html><table><tr><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><frameset></frameset><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><frameset></frameset></html><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><table><tr></tbody><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <tfoot> + +#data +<!doctype html><table><td><svg></svg>abc<td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <svg svg> +| "abc" +| <td> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat new file mode 100644 index 00000000000..0d62f5a5b02 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat @@ -0,0 +1,1237 @@ +#data +<!doctype html><math><mn DefinitionUrl="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mn> +| definitionURL="foo" + +#data +<!doctype html><html></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <!-- foo --> +| <head> +| <body> + +#data +<!doctype html><head></head></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- foo --> +| <body> + +#data +<!doctype html><body><p><pre> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <pre> + +#data +<!doctype html><body><p><listing> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <listing> + +#data +<!doctype html><p><plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <plaintext> + +#data +<!doctype html><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <h1> + +#data +<!doctype html><form><isindex> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> + +#data +<!doctype html><isindex action="POST"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| action="POST" +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex prompt="this is isindex"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "this is isindex" +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex type="hidden"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| type="hidden" +| <hr> + +#data +<!doctype html><isindex name="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><ruby><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rp> + +#data +<!doctype html><ruby><div><span><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rp> + +#data +<!doctype html><ruby><div><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rp> + +#data +<!doctype html><ruby><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rt> + +#data +<!doctype html><ruby><div><span><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rt> + +#data +<!doctype html><ruby><div><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rt> + +#data +<!doctype html><math/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <foo> + +#data +<!doctype html><svg/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| <foo> + +#data +<!doctype html><div></body><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <!-- foo --> + +#data +<!doctype html><h1><div><h3><span></h1>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h1> +| <div> +| <h3> +| <span> +| "foo" + +#data +<!doctype html><p></h3>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "foo" + +#data +<!doctype html><h3><li>abc</h2>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h3> +| <li> +| "abc" +| "foo" + +#data +<!doctype html><table>abc<!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <!-- foo --> + +#data +<!doctype html><table> <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " " +| <!-- foo --> + +#data +<!doctype html><table> b <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " b " +| <table> +| <!-- foo --> + +#data +<!doctype html><select><option><option> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><p><math><mi><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mi> +| <p> +| <h1> + +#data +<!doctype html><p><math><mo><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mo> +| <p> +| <h1> + +#data +<!doctype html><p><math><mn><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <p> +| <h1> + +#data +<!doctype html><p><math><ms><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math ms> +| <p> +| <h1> + +#data +<!doctype html><p><math><mtext><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mtext> +| <p> +| <h1> + +#data +<!doctype html><frameset></noframes> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html c=d><body></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><html c=d><frameset></frameset></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <!-- foo --> + +#data +<!doctype html><html><frameset></frameset></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| " " + +#data +<!doctype html><html><frameset></frameset></html>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html></p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<html><frameset></frameset></html><!doctype html> +#errors +#document +| <html> +| <head> +| <frameset> + +#data +<!doctype html><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!doctype html><p><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><p>a<frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "a" + +#data +<!doctype html><p> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><pre><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <pre> + +#data +<!doctype html><listing><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <listing> + +#data +<!doctype html><li><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <li> + +#data +<!doctype html><dd><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dd> + +#data +<!doctype html><dt><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> + +#data +<!doctype html><button><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <button> + +#data +<!doctype html><applet><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <applet> + +#data +<!doctype html><marquee><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <marquee> + +#data +<!doctype html><object><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> + +#data +<!doctype html><table><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> + +#data +<!doctype html><area><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <area> + +#data +<!doctype html><basefont><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <basefont> +| <frameset> + +#data +<!doctype html><bgsound><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <bgsound> +| <frameset> + +#data +<!doctype html><br><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <br> + +#data +<!doctype html><embed><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <embed> + +#data +<!doctype html><img><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html><input><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <input> + +#data +<!doctype html><keygen><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <keygen> + +#data +<!doctype html><wbr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <wbr> + +#data +<!doctype html><hr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <hr> + +#data +<!doctype html><textarea></textarea><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> + +#data +<!doctype html><xmp></xmp><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> + +#data +<!doctype html><iframe></iframe><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> + +#data +<!doctype html><select></select><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><svg></svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><math></math><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg><foreignObject><div> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg>a</svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| "a" + +#data +<!doctype html><svg> </svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<html>aaa<frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "aaa" + +#data +<html> a <frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "a " + +#data +<!doctype html><div><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><div><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> + +#data +<!doctype html><p><math></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| "a" + +#data +<!doctype html><p><math><mn><span></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <span> +| <p> +| "a" + +#data +<!doctype html><math></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> + +#data +<!doctype html><meta charset="ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| charset="ascii" +| <body> + +#data +<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| content="text/html;charset=ascii" +| http-equiv="content-type" +| <body> + +#data +<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa --> +| <meta> +| charset="utf8" +| <body> + +#data +<!doctype html><html a=b><head></head><html c=d> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><image/> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html>a<i>b<table>c<b>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "a" +| <i> +| "bc" +| <b> +| "de" +| "f" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" + +#data +<!doctype html><table><i>a<b>b<div>c</i> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <i> +| "c" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><table><i>a<div>b<tr>c<b>d</i>e +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <div> +| "b" +| <i> +| "c" +| <b> +| "d" +| <b> +| "e" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><table><i>a<div>b<b>c</i>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <i> +| "a" +| <div> +| <i> +| "b" +| <b> +| "c" +| <b> +| "d" +| <table> + +#data +<!doctype html><body><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <bgsound> + +#data +<!doctype html><body><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <basefont> + +#data +<!doctype html><a><b></a><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <basefont> + +#data +<!doctype html><a><b></a><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <bgsound> + +#data +<!doctype html><figcaption><article></figcaption>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <figcaption> +| <article> +| "a" + +#data +<!doctype html><summary><article></summary>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <summary> +| <article> +| "a" + +#data +<!doctype html><p><a><plaintext>b +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <a> +| <plaintext> +| <a> +| "b" + +#data +<!DOCTYPE html><div>a<a></div>b<p>c</p>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| "a" +| <a> +| <a> +| "b" +| <p> +| "c" +| "d" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat new file mode 100644 index 00000000000..60d85922162 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat @@ -0,0 +1,763 @@ +#data +<!DOCTYPE html>Test +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "Test" + +#data +<textarea>test</div>test +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 24 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <textarea> +| "test</div>test" + +#data +<table><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 11 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<table><td>test</tbody></table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| "test" + +#data +<frame>test +#errors +Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE. +Line: 1 Col: 7 Unexpected start tag frame. Ignored. +#document +| <html> +| <head> +| <body> +| "test" + +#data +<!DOCTYPE html><frameset>test +#errors +Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored. +Line: 1 Col: 29 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><frameset><!DOCTYPE html> +#errors +Line: 1 Col: 40 Unexpected DOCTYPE. Ignored. +Line: 1 Col: 40 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><font><p><b>test</font> +#errors +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <p> +| <font> +| <b> +| "test" + +#data +<!DOCTYPE html><dt><div><dd> +#errors +Line: 1 Col: 28 Missing end tag (div, dt). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> +| <div> +| <dd> + +#data +<script></x +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</x" +| <body> + +#data +<table><plaintext><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode. +Line: 1 Col: 22 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "<td>" +| <table> + +#data +<plaintext></plaintext> +#errors +Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!DOCTYPE html><table><tr>TEST +#errors +Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "TEST" +| <table> +| <tbody> +| <tr> + +#data +<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4> +#errors +Line: 1 Col: 37 Unexpected start tag (body). +Line: 1 Col: 53 Unexpected start tag (body). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| t1="1" +| t2="2" +| t3="3" +| t4="4" + +#data +</b test +#errors +Line: 1 Col: 8 Unexpected end of file in attribute name. +Line: 1 Col: 8 End tag contains unexpected attributes. +Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html></b test<b &=&>X +#errors +Line: 1 Col: 32 Named entity didn't end with ';'. +Line: 1 Col: 33 End tag contains unexpected attributes. +Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" + +#data +<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 54 Unexpected end of file in the tag name. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| type="text/x-foobar;baz" +| "X</SCRipt" +| <body> + +#data +& +#errors +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&# +#errors +Line: 1 Col: 1 Numeric entity expected. Got end of file instead. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#" + +#data +&#X +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#X" + +#data +&#x +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#x" + +#data +- +#errors +Line: 1 Col: 4 Numeric entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "-" + +#data +&x-test +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&x-test" + +#data +<!doctypehtml><p><li> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <li> + +#data +<!doctypehtml><p><dt> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dt> + +#data +<!doctypehtml><p><dd> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dd> + +#data +<!doctypehtml><p><form> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <form> + +#data +<!DOCTYPE html><p></P>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "X" + +#data +& +#errors +Line: 1 Col: 4 Named entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&AMp; +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&AMp;" + +#data +<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY> +#errors +Line: 1 Col: 110 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly> + +#data +<!DOCTYPE html>X</body>X +#errors +Line: 1 Col: 24 Unexpected non-space characters in the after body phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "XX" + +#data +<!DOCTYPE html><!-- X +#errors +Line: 1 Col: 21 Unexpected end of file in comment. +#document +| <!DOCTYPE html> +| <!-- X --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><table><caption>test TEST</caption><td>test +#errors +Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 58 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| "test TEST" +| <tbody> +| <tr> +| <td> +| "test" + +#data +<!DOCTYPE html><select><option><optgroup> +#errors +Line: 1 Col: 41 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option> +#errors +Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag. +Line: 1 Col: 76 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <option> +| <option> + +#data +<!DOCTYPE html><select><optgroup><option><optgroup> +#errors +Line: 1 Col: 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><datalist><option>foo</datalist>bar +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <datalist> +| <option> +| "foo" +| "bar" + +#data +<!DOCTYPE html><font><input><input></font> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <input> +| <input> + +#data +<!DOCTYPE html><!-- XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX +#errors +Line: 1 Col: 29 Unexpected end of file in comment (-) +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<isindex test=x name=x> +#errors +Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected start tag isindex. Don't use it! +#document +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| test="x" +| <hr> + +#data +test +test +#errors +Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "test +test" + +#data +<!DOCTYPE html><body><title>test</body> +#errors +#document +| +| +| +| +| +| "test</body>" + +#data +<!DOCTYPE html><body><title>X +#errors +#document +| +| +| +| +| +| "X" +| <meta> +| name="z" +| <link> +| rel="foo" +| <style> +| " +x { content:"</style" } " + +#data +<!DOCTYPE html><select><optgroup></optgroup></select> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> + +#data + + +#errors +Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html> <html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><script> +</script> <title>x +#errors +#document +| +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +#document +| +| +| +| +| "x" +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (style). +#document +| +| +| --> x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +|

+#errors +#document +| +| +| +| +| +| ddd +#errors +#document +| +| +| +#errors +#document +| +| +| +| +|
  • +| +| ", + " +
    << Back to Go HTTP/2 demo server`) + }) +} + +func httpsHost() string { + if *hostHTTPS != "" { + return *hostHTTPS + } + if v := *httpsAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func httpHost() string { + if *hostHTTP != "" { + return *hostHTTP + } + if v := *httpAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func serveProdTLS() error { + c, err := googlestorage.NewServiceClient() + if err != nil { + return err + } + slurp := func(key string) ([]byte, error) { + const bucket = "http2-demo-server-tls" + rc, _, err := c.GetObject(&googlestorage.Object{ + Bucket: bucket, + Key: key, + }) + if err != nil { + return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err) + } + defer rc.Close() + return ioutil.ReadAll(rc) + } + certPem, err := slurp("http2.golang.org.chained.pem") + if err != nil { + return err + } + keyPem, err := slurp("http2.golang.org.key") + if err != nil { + return err + } + cert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + return err + } + srv := &http.Server{ + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{cert}, + }, + } + http2.ConfigureServer(srv, &http2.Server{}) + ln, err := net.Listen("tcp", ":443") + if err != nil { + return err + } + return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) +} + +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func serveProd() error { + errc := make(chan error, 2) + go func() { errc <- http.ListenAndServe(":80", nil) }() + go func() { errc <- serveProdTLS() }() + return <-errc +} + +const idleTimeout = 5 * time.Minute +const activeTimeout = 10 * time.Minute + +// TODO: put this into the standard library and actually send +// PING frames and GOAWAY, etc: golang.org/issue/14204 +func idleTimeoutHook() func(net.Conn, http.ConnState) { + var mu sync.Mutex + m := map[net.Conn]*time.Timer{} + return func(c net.Conn, cs http.ConnState) { + mu.Lock() + defer mu.Unlock() + if t, ok := m[c]; ok { + delete(m, c) + t.Stop() + } + var d time.Duration + switch cs { + case http.StateNew, http.StateIdle: + d = idleTimeout + case http.StateActive: + d = activeTimeout + default: + return + } + m[c] = time.AfterFunc(d, func() { + log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) + go c.Close() + }) + } +} + +func main() { + var srv http.Server + flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") + flag.Parse() + srv.Addr = *httpsAddr + srv.ConnState = idleTimeoutHook() + + registerHandlers() + + if *prod { + *hostHTTP = "http2.golang.org" + *hostHTTPS = "http2.golang.org" + log.Fatal(serveProd()) + } + + url := "https://" + httpsHost() + "/" + log.Printf("Listening on " + url) + http2.ConfigureServer(&srv, &http2.Server{}) + + if *httpAddr != "" { + go func() { + log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)") + log.Fatal(http.ListenAndServe(*httpAddr, nil)) + }() + } + + go func() { + log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) + }() + select {} +} diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go new file mode 100644 index 00000000000..df0866a3077 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/launch.go @@ -0,0 +1,302 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + proj = flag.String("project", "symbolic-datum-552", "name of Project") + zone = flag.String("zone", "us-central1-a", "GCE zone") + mach = flag.String("machinetype", "n1-standard-1", "Machine type") + instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") + sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") + staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") + + writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") + publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") +) + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} + +var config = &oauth2.Config{ + // The client-id and secret should be for an "Installed Application" when using + // the CLI. Later we'll use a web application with a callback. + ClientID: readFile("client-id.dat"), + ClientSecret: readFile("client-secret.dat"), + Endpoint: google.Endpoint, + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + "https://www.googleapis.com/auth/sqlservice", + "https://www.googleapis.com/auth/sqlservice.admin", + }, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", +} + +const baseConfig = `#cloud-config +coreos: + units: + - name: h2demo.service + command: start + content: | + [Unit] + Description=HTTP2 Demo + + [Service] + ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' + ExecStart=/opt/bin/h2demo --prod + RestartSec=5s + Restart=always + Type=simple + + [Install] + WantedBy=multi-user.target +` + +func main() { + flag.Parse() + if *proj == "" { + log.Fatalf("Missing --project flag") + } + prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj + machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach + + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() + if err != nil { + if *writeObject != "" { + log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") + } + log.Printf("Error getting token from %s: %v", tokenFileName, err) + log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) + fmt.Print("\nEnter auth code: ") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + authCode := strings.TrimSpace(sc.Text()) + token, err = config.Exchange(oauth2.NoContext, authCode) + if err != nil { + log.Fatalf("Error exchanging auth code for a token: %v", err) + } + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + if *writeObject != "" { + writeCloudStorageObject(oauthClient) + return + } + + computeService, _ := compute.New(oauthClient) + + natIP := *staticIP + if natIP == "" { + // Try to find it by name. + aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() + if err != nil { + log.Fatal(err) + } + // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList + IPLoop: + for _, asl := range aggAddrList.Items { + for _, addr := range asl.Addresses { + if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { + natIP = addr.Address + break IPLoop + } + } + } + } + + cloudConfig := baseConfig + if *sshPub != "" { + key := strings.TrimSpace(readFile(*sshPub)) + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) + } + if os.Getenv("USER") == "bradfitz" { + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") + } + const maxCloudConfig = 32 << 10 // per compute API docs + if len(cloudConfig) > maxCloudConfig { + log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) + } + + instance := &compute.Instance{ + Name: *instName, + Description: "Go Builder", + MachineType: machType, + Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, + Tags: &compute.Tags{ + Items: []string{"http-server", "https-server"}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: &cloudConfig, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + { + AccessConfigs: []*compute.AccessConfig{ + { + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + NatIP: natIP, + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + }, + }, + }, + } + + log.Printf("Creating instance...") + op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() + if err != nil { + log.Fatalf("Failed to create instance: %v", err) + } + opName := op.Name + log.Printf("Created. Waiting on operation %v", opName) +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() + if err != nil { + log.Fatalf("Failed to get op %s: %v", opName, err) + } + switch op.Status { + case "PENDING", "RUNNING": + log.Printf("Waiting on operation %v", opName) + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + log.Printf("Error: %+v", operr) + } + log.Fatalf("Failed to start.") + } + log.Printf("Success. %+v", op) + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + + inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() + if err != nil { + log.Fatalf("Error getting instance after creation: %v", err) + } + ij, _ := json.MarshalIndent(inst, "", " ") + log.Printf("Instance: %s", ij) +} + +func instanceDisk(svc *compute.Service) *compute.AttachedDisk { + const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" + diskName := *instName + "-disk" + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: diskName, + SourceImage: imageURL, + DiskSizeGb: 50, + }, + } +} + +func writeCloudStorageObject(httpClient *http.Client) { + content := os.Stdin + const maxSlurp = 1 << 20 + var buf bytes.Buffer + n, err := io.CopyN(&buf, content, maxSlurp) + if err != nil && err != io.EOF { + log.Fatalf("Error reading from stdin: %v, %v", n, err) + } + contentType := http.DetectContentType(buf.Bytes()) + + req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) + if err != nil { + log.Fatal(err) + } + req.Header.Set("x-goog-api-version", "2") + if *publicObject { + req.Header.Set("x-goog-acl", "public-read") + } + req.Header.Set("Content-Type", contentType) + res, err := httpClient.Do(req) + if err != nil { + log.Fatal(err) + } + if res.StatusCode != 200 { + res.Write(os.Stderr) + log.Fatalf("Failed.") + } + log.Printf("Success.") + os.Exit(0) +} + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key new file mode 100644 index 00000000000..a15a6abaf78 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q +62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby +XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV +mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ +JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ +SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA +nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e +/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx +qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser +hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j +NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E +LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 +8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c +0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws +K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd +bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo +QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt +Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 +nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy +b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 +gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev +WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr +C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj +x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA +hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem new file mode 100644 index 00000000000..3a323e774e2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG +A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 +DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 +NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG +cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS +R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT +ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk +JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 +mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW +caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G +A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt +hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB +MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES +MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv +bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h +U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao +eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 +UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD +58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n +sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF +kPe6XoSbiLm/kxk32T0= +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl new file mode 100644 index 00000000000..6db3891880e --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl @@ -0,0 +1 @@ +E2CE26BF3285059C diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt new file mode 100644 index 00000000000..c59059bd640 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT +C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW +DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow +RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE +ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l +gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 +dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL +A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws +/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 +F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB +AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R +rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD +EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 +KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI +dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU +90p6/CbU71bGbfpM2PHot2fm +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key new file mode 100644 index 00000000000..f329c142120 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi +fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm +J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef +b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 +mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ +fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p +3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 +qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 +NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 +LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN +a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ +Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL +W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO +gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm +S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS +Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp +V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 +KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 +yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 +drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e +ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R +48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 +c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY +nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl +IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md new file mode 100644 index 00000000000..fb5c5efb0f0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/README.md @@ -0,0 +1,97 @@ +# h2i + +**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' +days of telnetting to your HTTP/1.n servers? We're bringing you +back. + +Features: +- send raw HTTP/2 frames + - PING + - SETTINGS + - HEADERS + - etc +- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 +- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) +- tab completion of commands, options + +Not yet features, but soon: +- unnecessary CONTINUATION frames on short boundaries, to test peer implementations +- request bodies (DATA frames) +- send invalid frames for testing server implementations (supported by underlying Framer) + +Later: +- act like a server + +## Installation + +``` +$ go get golang.org/x/net/http2/h2i +$ h2i +``` + +## Demo + +``` +$ h2i +Usage: h2i + + -insecure + Whether to skip TLS cert validation + -nextproto string + Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") + +$ h2i google.com +Connecting to google.com:443 ... +Connected to 74.125.224.41:443 +Negotiated protocol "h2-14" +[FrameHeader SETTINGS len=18] + [MAX_CONCURRENT_STREAMS = 100] + [INITIAL_WINDOW_SIZE = 1048576] + [MAX_FRAME_SIZE = 16384] +[FrameHeader WINDOW_UPDATE len=4] + Window-Increment = 983041 + +h2i> PING h2iSayHI +[FrameHeader PING flags=ACK len=8] + Data = "h2iSayHI" +h2i> headers +(as HTTP/1.1)> GET / HTTP/1.1 +(as HTTP/1.1)> Host: ip.appspot.com +(as HTTP/1.1)> User-Agent: h2i/brad-n-blake +(as HTTP/1.1)> +Opening Stream-ID 1: + :authority = ip.appspot.com + :method = GET + :path = / + :scheme = https + user-agent = h2i/brad-n-blake +[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] + :status = "200" + alternate-protocol = "443:quic,p=1" + content-length = "15" + content-type = "text/html" + date = "Fri, 01 May 2015 23:06:56 GMT" + server = "Google Frontend" +[FrameHeader DATA flags=END_STREAM stream=1 len=15] + "173.164.155.78\n" +[FrameHeader PING len=8] + Data = "\x00\x00\x00\x00\x00\x00\x00\x00" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader GOAWAY len=22] + Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) + +ReadFrame: EOF +``` + +## Status + +Quick few hour hack. So much yet to do. Feel free to file issues for +bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) +and I aren't yet accepting pull requests until things settle down. + diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go new file mode 100644 index 00000000000..b70976f7754 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/h2i.go @@ -0,0 +1,501 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +/* +The h2i command is an interactive HTTP/2 console. + +Usage: + $ h2i [flags] + +Interactive commands in the console: (all parts case-insensitive) + + ping [data] + settings ack + settings FOO=n BAR=z + headers (open a new stream by typing HTTP/1.1) +*/ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "strconv" + "strings" + + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Flags +var ( + flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") + flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") + flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.") +) + +type command struct { + run func(*h2i, []string) error // required + + // complete optionally specifies tokens (case-insensitive) which are + // valid for this subcommand. + complete func() []string +} + +var commands = map[string]command{ + "ping": {run: (*h2i).cmdPing}, + "settings": { + run: (*h2i).cmdSettings, + complete: func() []string { + return []string{ + "ACK", + http2.SettingHeaderTableSize.String(), + http2.SettingEnablePush.String(), + http2.SettingMaxConcurrentStreams.String(), + http2.SettingInitialWindowSize.String(), + http2.SettingMaxFrameSize.String(), + http2.SettingMaxHeaderListSize.String(), + } + }, + }, + "quit": {run: (*h2i).cmdQuit}, + "headers": {run: (*h2i).cmdHeaders}, +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") + flag.PrintDefaults() +} + +// withPort adds ":443" if another port isn't already present. +func withPort(host string) string { + if _, _, err := net.SplitHostPort(host); err != nil { + return net.JoinHostPort(host, "443") + } + return host +} + +// h2i is the app's state. +type h2i struct { + host string + tc *tls.Conn + framer *http2.Framer + term *terminal.Terminal + + // owned by the command loop: + streamID uint32 + hbuf bytes.Buffer + henc *hpack.Encoder + + // owned by the readFrames loop: + peerSetting map[http2.SettingID]uint32 + hdec *hpack.Decoder +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + os.Exit(2) + } + log.SetFlags(0) + + host := flag.Arg(0) + app := &h2i{ + host: host, + peerSetting: make(map[http2.SettingID]uint32), + } + app.henc = hpack.NewEncoder(&app.hbuf) + + if err := app.Main(); err != nil { + if app.term != nil { + app.logf("%v\n", err) + } else { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "\n") +} + +func (app *h2i) Main() error { + cfg := &tls.Config{ + ServerName: app.host, + NextProtos: strings.Split(*flagNextProto, ","), + InsecureSkipVerify: *flagInsecure, + } + + hostAndPort := withPort(app.host) + log.Printf("Connecting to %s ...", hostAndPort) + tc, err := tls.Dial("tcp", hostAndPort, cfg) + if err != nil { + return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err) + } + log.Printf("Connected to %v", tc.RemoteAddr()) + defer tc.Close() + + if err := tc.Handshake(); err != nil { + return fmt.Errorf("TLS handshake: %v", err) + } + if !*flagInsecure { + if err := tc.VerifyHostname(app.host); err != nil { + return fmt.Errorf("VerifyHostname: %v", err) + } + } + state := tc.ConnectionState() + log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) + if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { + return fmt.Errorf("Could not negotiate protocol mutually") + } + + if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { + return err + } + + app.framer = http2.NewFramer(tc, tc) + + oldState, err := terminal.MakeRaw(0) + if err != nil { + return err + } + defer terminal.Restore(0, oldState) + + var screen = struct { + io.Reader + io.Writer + }{os.Stdin, os.Stdout} + + app.term = terminal.NewTerminal(screen, "h2i> ") + lastWord := regexp.MustCompile(`.+\W(\w+)$`) + app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { + if key != '\t' { + return + } + if pos != len(line) { + // TODO: we're being lazy for now, only supporting tab completion at the end. + return + } + // Auto-complete for the command itself. + if !strings.Contains(line, " ") { + var name string + name, _, ok = lookupCommand(line) + if !ok { + return + } + return name, len(name), true + } + _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) + if !ok || c.complete == nil { + return + } + if strings.HasSuffix(line, " ") { + app.logf("%s", strings.Join(c.complete(), " ")) + return line, pos, true + } + m := lastWord.FindStringSubmatch(line) + if m == nil { + return line, len(line), true + } + soFar := m[1] + var match []string + for _, cand := range c.complete() { + if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { + continue + } + match = append(match, cand) + } + if len(match) == 0 { + return + } + if len(match) > 1 { + // TODO: auto-complete any common prefix + app.logf("%s", strings.Join(match, " ")) + return line, pos, true + } + newLine = line[:len(line)-len(soFar)] + match[0] + return newLine, len(newLine), true + + } + + errc := make(chan error, 2) + go func() { errc <- app.readFrames() }() + go func() { errc <- app.readConsole() }() + return <-errc +} + +func (app *h2i) logf(format string, args ...interface{}) { + fmt.Fprintf(app.term, format+"\n", args...) +} + +func (app *h2i) readConsole() error { + if s := *flagSettings; s != "omit" { + var args []string + if s != "empty" { + args = strings.Split(s, ",") + } + _, c, ok := lookupCommand("settings") + if !ok { + panic("settings command not found") + } + c.run(app, args) + } + + for { + line, err := app.term.ReadLine() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("terminal.ReadLine: %v", err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + cmd, args := f[0], f[1:] + if _, c, ok := lookupCommand(cmd); ok { + err = c.run(app, args) + } else { + app.logf("Unknown command %q", line) + } + if err == errExitApp { + return nil + } + if err != nil { + return err + } + } +} + +func lookupCommand(prefix string) (name string, c command, ok bool) { + prefix = strings.ToLower(prefix) + if c, ok = commands[prefix]; ok { + return prefix, c, ok + } + + for full, candidate := range commands { + if strings.HasPrefix(full, prefix) { + if c.run != nil { + return "", command{}, false // ambiguous + } + c = candidate + name = full + } + } + return name, c, c.run != nil +} + +var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") + +func (a *h2i) cmdQuit(args []string) error { + if len(args) > 0 { + a.logf("the QUIT command takes no argument") + return nil + } + return errExitApp +} + +func (a *h2i) cmdSettings(args []string) error { + if len(args) == 1 && strings.EqualFold(args[0], "ACK") { + return a.framer.WriteSettingsAck() + } + var settings []http2.Setting + for _, arg := range args { + if strings.EqualFold(arg, "ACK") { + a.logf("Error: ACK must be only argument with the SETTINGS command") + return nil + } + eq := strings.Index(arg, "=") + if eq == -1 { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + sid, ok := settingByName(arg[:eq]) + if !ok { + a.logf("Error: unknown setting name %q", arg[:eq]) + return nil + } + val, err := strconv.ParseUint(arg[eq+1:], 10, 32) + if err != nil { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + settings = append(settings, http2.Setting{ + ID: sid, + Val: uint32(val), + }) + } + a.logf("Sending: %v", settings) + return a.framer.WriteSettings(settings...) +} + +func settingByName(name string) (http2.SettingID, bool) { + for _, sid := range [...]http2.SettingID{ + http2.SettingHeaderTableSize, + http2.SettingEnablePush, + http2.SettingMaxConcurrentStreams, + http2.SettingInitialWindowSize, + http2.SettingMaxFrameSize, + http2.SettingMaxHeaderListSize, + } { + if strings.EqualFold(sid.String(), name) { + return sid, true + } + } + return 0, false +} + +func (app *h2i) cmdPing(args []string) error { + if len(args) > 1 { + app.logf("invalid PING usage: only accepts 0 or 1 args") + return nil // nil means don't end the program + } + var data [8]byte + if len(args) == 1 { + copy(data[:], args[0]) + } else { + copy(data[:], "h2i_ping") + } + return app.framer.WritePing(false, data) +} + +func (app *h2i) cmdHeaders(args []string) error { + if len(args) > 0 { + app.logf("Error: HEADERS doesn't yet take arguments.") + // TODO: flags for restricting window size, to force CONTINUATION + // frames. + return nil + } + var h1req bytes.Buffer + app.term.SetPrompt("(as HTTP/1.1)> ") + defer app.term.SetPrompt("h2i> ") + for { + line, err := app.term.ReadLine() + if err != nil { + return err + } + h1req.WriteString(line) + h1req.WriteString("\r\n") + if line == "" { + break + } + } + req, err := http.ReadRequest(bufio.NewReader(&h1req)) + if err != nil { + app.logf("Invalid HTTP/1.1 request: %v", err) + return nil + } + if app.streamID == 0 { + app.streamID = 1 + } else { + app.streamID += 2 + } + app.logf("Opening Stream-ID %d:", app.streamID) + hbf := app.encodeHeaders(req) + if len(hbf) > 16<<10 { + app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") + return nil + } + return app.framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: app.streamID, + BlockFragment: hbf, + EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now + EndHeaders: true, // for now + }) +} + +func (app *h2i) readFrames() error { + for { + f, err := app.framer.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame: %v", err) + } + app.logf("%v", f) + switch f := f.(type) { + case *http2.PingFrame: + app.logf(" Data = %q", f.Data) + case *http2.SettingsFrame: + f.ForeachSetting(func(s http2.Setting) error { + app.logf(" %v", s) + app.peerSetting[s.ID] = s.Val + return nil + }) + case *http2.WindowUpdateFrame: + app.logf(" Window-Increment = %v\n", f.Increment) + case *http2.GoAwayFrame: + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode) + case *http2.DataFrame: + app.logf(" %q", f.Data()) + case *http2.HeadersFrame: + if f.HasPriority() { + app.logf(" PRIORITY = %v", f.Priority) + } + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + } + } +} + +// called from readLoop +func (app *h2i) onNewHeaderField(f hpack.HeaderField) { + if f.Sensitive { + app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) + } + app.logf(" %s = %q", f.Name, f.Value) +} + +func (app *h2i) encodeHeaders(req *http.Request) []byte { + app.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.URL.Path + if path == "" { + path = "/" + } + + app.writeHeader(":authority", host) // probably not right for all sites + app.writeHeader(":method", req.Method) + app.writeHeader(":path", path) + app.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + app.writeHeader(lowKey, v) + } + } + return app.hbuf.Bytes() +} + +func (app *h2i) writeHeader(name, value string) { + app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) + app.logf(" %s = %s", name, value) +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index 014f7896466..c2805f6ac4d 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -57,6 +57,7 @@ func init() { "server", "set-cookie", "strict-transport-security", + "trailer", "transfer-encoding", "user-agent", "vary", diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 80d621cf355..f9bb0339848 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -144,7 +144,7 @@ func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { // shouldIndex reports whether f should be indexed. func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.size() <= e.dynTab.maxSize + return !f.Sensitive && f.Size() <= e.dynTab.maxSize } // appendIndexed appends index i, as encoded in "Indexed Header Field" diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go new file mode 100644 index 00000000000..92286f3bad1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go @@ -0,0 +1,330 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "reflect" + "strings" + "testing" +) + +func TestEncoderTableSizeUpdate(t *testing.T) { + tests := []struct { + size1, size2 uint32 + wantHex string + }{ + // Should emit 2 table size updates (2048 and 4096) + {2048, 4096, "3fe10f 3fe11f 82"}, + + // Should emit 1 table size update (2048) + {16384, 2048, "3fe10f 82"}, + } + for _, tt := range tests { + var buf bytes.Buffer + e := NewEncoder(&buf) + e.SetMaxDynamicTableSize(tt.size1) + e.SetMaxDynamicTableSize(tt.size2) + if err := e.WriteField(pair(":method", "GET")); err != nil { + t.Fatal(err) + } + want := removeSpace(tt.wantHex) + if got := hex.EncodeToString(buf.Bytes()); got != want { + t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) + } + } +} + +func TestEncoderWriteField(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + var got []HeaderField + d := NewDecoder(4<<10, func(f HeaderField) { + got = append(got, f) + }) + + tests := []struct { + hdrs []HeaderField + }{ + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }}, + } + for i, tt := range tests { + buf.Reset() + got = got[:0] + for _, hf := range tt.hdrs { + if err := e.WriteField(hf); err != nil { + t.Fatal(err) + } + } + _, err := d.Write(buf.Bytes()) + if err != nil { + t.Errorf("%d. Decoder Write = %v", i, err) + } + if !reflect.DeepEqual(got, tt.hdrs) { + t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) + } + } +} + +func TestEncoderSearchTable(t *testing.T) { + e := NewEncoder(nil) + + e.dynTab.add(pair("foo", "bar")) + e.dynTab.add(pair("blake", "miz")) + e.dynTab.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), uint64(len(staticTable) + 3), true}, + {pair("blake", "miz"), uint64(len(staticTable) + 2), true}, + {pair(":method", "GET"), 2, true}, + + // Only name match because Sensitive == true + {HeaderField{":method", "GET", true}, 2, false}, + + // Only Name matches + {pair("foo", "..."), uint64(len(staticTable) + 3), false}, + {pair("blake", "..."), uint64(len(staticTable) + 2), false}, + {pair(":method", "..."), 2, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestAppendVarInt(t *testing.T) { + tests := []struct { + n byte + i uint64 + want []byte + }{ + // Fits in a byte: + {1, 0, []byte{0}}, + {2, 2, []byte{2}}, + {3, 6, []byte{6}}, + {4, 14, []byte{14}}, + {5, 30, []byte{30}}, + {6, 62, []byte{62}}, + {7, 126, []byte{126}}, + {8, 254, []byte{254}}, + + // Multiple bytes: + {5, 1337, []byte{31, 154, 10}}, + } + for _, tt := range tests { + got := appendVarInt(nil, tt.n, tt.i) + if !bytes.Equal(got, tt.want) { + t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) + } + } +} + +func TestAppendHpackString(t *testing.T) { + tests := []struct { + s, wantHex string + }{ + // Huffman encoded + {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + + // Not Huffman encoded + {"a", "01 61"}, + + // zero length + {"", "00"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendHpackString(nil, tt.s) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) + } + } +} + +func TestAppendIndexed(t *testing.T) { + tests := []struct { + i uint64 + wantHex string + }{ + // 1 byte + {1, "81"}, + {126, "fe"}, + + // 2 bytes + {127, "ff00"}, + {128, "ff01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexed(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestAppendNewName(t *testing.T) { + tests := []struct { + f HeaderField + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Without indexing + {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Never indexed + {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendNewName(nil, tt.f, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendIndexedName(t *testing.T) { + tests := []struct { + f HeaderField + i uint64 + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, + + // Without indexing + {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, + + // Never indexed + {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, + {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendTableSize(t *testing.T) { + tests := []struct { + i uint32 + wantHex string + }{ + // Fits into 1 byte + {30, "3e"}, + + // Extra byte + {31, "3f00"}, + {32, "3f01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendTableSize(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestEncoderSetMaxDynamicTableSize(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + tests := []struct { + v uint32 + wantUpdate bool + wantMinSize uint32 + wantMaxSize uint32 + }{ + // Set new table size to 2048 + {2048, true, 2048, 2048}, + + // Set new table size to 16384, but still limited to + // 4096 + {16384, true, 2048, 4096}, + } + for _, tt := range tests { + e.SetMaxDynamicTableSize(tt.v) + if got := e.tableSizeUpdate; tt.wantUpdate != got { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) + } + if got := e.minSize; tt.wantMinSize != got { + t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) + } + if got := e.dynTab.maxSize; tt.wantMaxSize != got { + t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) + } + } +} + +func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { + e := NewEncoder(nil) + // 4095 < initialHeaderTableSize means maxSize is truncated to + // 4095. + e.SetMaxDynamicTableSizeLimit(4095) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(4095); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } + if got, want := e.tableSizeUpdate, true; got != want { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) + } + // maxSize will be truncated to maxSizeLimit + e.SetMaxDynamicTableSize(16384) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + // 8192 > current maxSizeLimit, so maxSize does not change. + e.SetMaxDynamicTableSizeLimit(8192) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(8192); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } +} + +func removeSpace(s string) string { + return strings.Replace(s, " ", "", -1) +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 8e9b2f2ebf5..8aa197ad678 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -41,7 +41,24 @@ type HeaderField struct { Sensitive bool } -func (hf *HeaderField) size() uint32 { +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7540 section 5.2. +func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's @@ -102,6 +119,13 @@ func (d *Decoder) SetMaxStringLength(n int) { d.maxStrLen = n } +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + // SetEmitEnabled controls whether the emitFunc provided to NewDecoder // should be called. The default is true. // @@ -156,7 +180,7 @@ func (dt *dynamicTable) setMaxSize(v uint32) { func (dt *dynamicTable) add(f HeaderField) { dt.ents = append(dt.ents, f) - dt.size += f.size() + dt.size += f.Size() dt.evict() } @@ -164,7 +188,7 @@ func (dt *dynamicTable) add(f HeaderField) { func (dt *dynamicTable) evict() { base := dt.ents // keep base pointer of slice for dt.size > dt.maxSize { - dt.size -= dt.ents[0].size() + dt.size -= dt.ents[0].Size() dt.ents = dt.ents[1:] } diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go new file mode 100644 index 00000000000..4c7b17bfb14 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go @@ -0,0 +1,854 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bufio" + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + "time" +) + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > len(staticTable) { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} + +func (d *Decoder) mustAt(idx int) HeaderField { + if hf, ok := d.at(uint64(idx)); !ok { + panic(fmt.Sprintf("bogus index %d", idx)) + } else { + return hf + } +} + +func TestDynamicTableAt(t *testing.T) { + d := NewDecoder(4096, nil) + at := d.mustAt + if got, want := at(2), (pair(":method", "GET")); got != want { + t.Errorf("at(2) = %v; want %v", got, want) + } + d.dynTab.add(pair("foo", "bar")) + d.dynTab.add(pair("blake", "miz")) + if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 2) = %v; want %v", got, want) + } + if got, want := at(3), (pair(":method", "POST")); got != want { + t.Errorf("at(3) = %v; want %v", got, want) + } +} + +func TestDynamicTableSearch(t *testing.T) { + dt := dynamicTable{} + dt.setMaxSize(4096) + + dt.add(pair("foo", "bar")) + dt.add(pair("blake", "miz")) + dt.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), 3, true}, + {pair(":method", "GET"), 1, true}, + + // Only name match because of Sensitive == true + {HeaderField{"blake", "miz", true}, 2, false}, + + // Only Name matches + {pair("foo", "..."), 3, false}, + {pair("blake", "..."), 2, false}, + {pair(":method", "..."), 1, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestDynamicTableSizeEvict(t *testing.T) { + d := NewDecoder(4096, nil) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("size = %d; want %d", d.dynTab.size, want) + } + add := d.dynTab.add + add(pair("blake", "eats pizza")) + if want := uint32(15 + 32); d.dynTab.size != want { + t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) + } + add(pair("foo", "bar")) + if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { + t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) + } + d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) + if want := uint32(6 + 32); d.dynTab.size != want { + t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) + } + if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + add(pair("long", strings.Repeat("x", 500))) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) + } +} + +func TestDecoderDecode(t *testing.T) { + tests := []struct { + name string + in []byte + want []HeaderField + wantDynTab []HeaderField // newest entry first + }{ + // C.2.1 Literal Header Field with Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 + {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), + []HeaderField{pair("custom-key", "custom-header")}, + []HeaderField{pair("custom-key", "custom-header")}, + }, + + // C.2.2 Literal Header Field without Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 + {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), + []HeaderField{pair(":path", "/sample/path")}, + []HeaderField{}}, + + // C.2.3 Literal Header Field never Indexed + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 + {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), + []HeaderField{{"password", "secret", true}}, + []HeaderField{}}, + + // C.2.4 Indexed Header Field + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 + {"C.2.4", []byte("\x82"), + []HeaderField{pair(":method", "GET")}, + []HeaderField{}}, + } + for _, tt := range tests { + d := NewDecoder(4096, nil) + hf, err := d.DecodeFull(tt.in) + if err != nil { + t.Errorf("%s: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(hf, tt.want) { + t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { + t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) + } + } +} + +func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { + hf = make([]HeaderField, len(dt.ents)) + for i := range hf { + hf[i] = dt.ents[len(dt.ents)-1-i] + } + return +} + +type encAndWant struct { + enc []byte + want []HeaderField + wantDynTab []HeaderField + wantDynSize uint32 +} + +// C.3 Request Examples without Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 +func TestDecodeC3_NoHuffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// C.4 Request Examples with Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 +func TestDecodeC4_Huffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5886 a8eb 1064 9cbf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 +// "This section shows several consecutive header lists, corresponding +// to HTTP responses, on the same connection. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur." +func TestDecodeC5_ResponsesNoHuff(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4803 3330 3258 0770 7269 7661 7465 611d +4d6f 6e2c 2032 3120 4f63 7420 3230 3133 +2032 303a 3133 3a32 3120 474d 546e 1768 +7474 7073 3a2f 2f77 7777 2e65 7861 6d70 +6c65 2e63 6f6d +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4803 3330 37c1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 +3230 3133 2032 303a 3133 3a32 3220 474d +54c0 5a04 677a 6970 7738 666f 6f3d 4153 +444a 4b48 514b 425a 584f 5157 454f 5049 +5541 5851 5745 4f49 553b 206d 6178 2d61 +6765 3d33 3630 303b 2076 6572 7369 6f6e +3d31 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 +// "This section shows the same examples as the previous section, but +// using Huffman encoding for the literal values. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur. The eviction mechanism +// uses the length of the decoded literal values, so the same +// evictions occurs as in the previous section." +func TestDecodeC6_ResponsesHuffman(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4882 6402 5885 aec3 771a 4b61 96d0 7abe +9410 54d4 44a8 2005 9504 0b81 66e0 82a6 +2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 +e9ae 82ae 43d3 +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4883 640e ffc1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 6196 d07a be94 1054 d444 a820 0595 +040b 8166 e084 a62d 1bff c05a 839b d9ab +77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b +3960 d5af 2708 7f36 72c1 ab27 0fb5 291f +9587 3160 65c0 03ed 4ee5 b106 3d50 07 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { + d := NewDecoder(size, nil) + for i, step := range steps { + hf, err := d.DecodeFull(step.enc) + if err != nil { + t.Fatalf("Error at step index %d: %v", i, err) + } + if !reflect.DeepEqual(hf, step.want) { + t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { + t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) + } + if d.dynTab.size != step.wantDynSize { + t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) + } + } +} + +func TestHuffmanDecodeExcessPadding(t *testing.T) { + tests := [][]byte{ + {0xff}, // Padding Exceeds 7 bits + {0x1f, 0xff}, // {"a", 1 byte excess padding} + {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding} + {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding} + {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding} + {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol. + } + for i, in := range tests { + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err) + } + } +} + +func TestHuffmanDecodeEOS(t *testing.T) { + in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) { + in := []byte{0x00, 0x01} // {"0", "0", "0"} + var buf bytes.Buffer + if err := huffmanDecode(&buf, 2, in); err != ErrStringLength { + t.Errorf("error = %v; want ErrStringLength", err) + } +} + +func TestHuffmanDecodeCorruptPadding(t *testing.T) { + in := []byte{0x00} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecode(t *testing.T) { + tests := []struct { + inHex, want string + }{ + {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, + {"a8eb 1064 9cbf", "no-cache"}, + {"25a8 49e9 5ba9 7d7f", "custom-key"}, + {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, + {"6402", "302"}, + {"aec3 771a 4b", "private"}, + {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, + {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, + {"9bd9 ab", "gzip"}, + {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", + "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, + } + for i, tt := range tests { + var buf bytes.Buffer + in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) + if err != nil { + t.Errorf("%d. hex input error: %v", i, err) + continue + } + if _, err := HuffmanDecode(&buf, in); err != nil { + t.Errorf("%d. decode error: %v", i, err) + continue + } + if got := buf.String(); tt.want != got { + t.Errorf("%d. decode = %q; want %q", i, got, tt.want) + } + } +} + +func TestAppendHuffmanString(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + {"no-cache", "a8eb 1064 9cbf"}, + {"custom-key", "25a8 49e9 5ba9 7d7f"}, + {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, + {"302", "6402"}, + {"private", "aec3 771a 4b"}, + {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, + {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, + {"gzip", "9bd9 ab"}, + {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", + "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, + } + for i, tt := range tests { + buf := []byte{} + want := strings.Replace(tt.want, " ", "", -1) + buf = AppendHuffmanString(buf, tt.in) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("%d. encode = %q; want %q", i, got, want) + } + } +} + +func TestHuffmanMaxStrLen(t *testing.T) { + const msg = "Some string" + huff := AppendHuffmanString(nil, msg) + + testGood := func(max int) { + var out bytes.Buffer + if err := huffmanDecode(&out, max, huff); err != nil { + t.Errorf("For maxLen=%d, unexpected error: %v", max, err) + } + if out.String() != msg { + t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg) + } + } + testGood(0) + testGood(len(msg)) + testGood(len(msg) + 1) + + var out bytes.Buffer + if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength { + t.Errorf("err = %v; want ErrStringLength", err) + } +} + +func TestHuffmanRoundtripStress(t *testing.T) { + const Len = 50 // of uncompressed string + input := make([]byte, Len) + var output bytes.Buffer + var huff []byte + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + var encSize int64 + for i := 0; i < n; i++ { + for l := range input { + input[l] = byte(src.Intn(256)) + } + huff = AppendHuffmanString(huff[:0], string(input)) + encSize += int64(len(huff)) + output.Reset() + if err := huffmanDecode(&output, 0, huff); err != nil { + t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err) + continue + } + if !bytes.Equal(output.Bytes(), input) { + t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes()) + } + } + t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize) +} + +func TestHuffmanDecodeFuzz(t *testing.T) { + const Len = 50 // of compressed + var buf, zbuf bytes.Buffer + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + numFail := 0 + for i := 0; i < n; i++ { + zbuf.Reset() + if i == 0 { + // Start with at least one invalid one. + zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8") + } else { + for l := 0; l < Len; l++ { + zbuf.WriteByte(byte(src.Intn(256))) + } + } + + buf.Reset() + if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil { + if err == ErrInvalidHuffman { + numFail++ + continue + } + t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err) + continue + } + } + t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n) + if numFail < 1 { + t.Error("expected at least one invalid huffman encoding (test starts with one)") + } +} + +func TestReadVarInt(t *testing.T) { + type res struct { + i uint64 + consumed int + err error + } + tests := []struct { + n byte + p []byte + want res + }{ + // Fits in a byte: + {1, []byte{0}, res{0, 1, nil}}, + {2, []byte{2}, res{2, 1, nil}}, + {3, []byte{6}, res{6, 1, nil}}, + {4, []byte{14}, res{14, 1, nil}}, + {5, []byte{30}, res{30, 1, nil}}, + {6, []byte{62}, res{62, 1, nil}}, + {7, []byte{126}, res{126, 1, nil}}, + {8, []byte{254}, res{254, 1, nil}}, + + // Doesn't fit in a byte: + {1, []byte{1}, res{0, 0, errNeedMore}}, + {2, []byte{3}, res{0, 0, errNeedMore}}, + {3, []byte{7}, res{0, 0, errNeedMore}}, + {4, []byte{15}, res{0, 0, errNeedMore}}, + {5, []byte{31}, res{0, 0, errNeedMore}}, + {6, []byte{63}, res{0, 0, errNeedMore}}, + {7, []byte{127}, res{0, 0, errNeedMore}}, + {8, []byte{255}, res{0, 0, errNeedMore}}, + + // Ignoring top bits: + {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 + {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 + {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 + + // Extra byte: + {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte + + // Short a byte: + {5, []byte{191, 154}, res{0, 0, errNeedMore}}, + + // integer overflow: + {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, + } + for _, tt := range tests { + i, remain, err := readVarInt(tt.n, tt.p) + consumed := len(tt.p) - len(remain) + got := res{i, consumed, err} + if got != tt.want { + t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) + } + } +} + +// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56 +func TestHuffmanFuzzCrash(t *testing.T) { + got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8")) + if got != "" { + t.Errorf("Got %q; want empty string", got) + } + if err != ErrInvalidHuffman { + t.Errorf("Err = %v; want ErrInvalidHuffman", err) + } +} + +func dehex(s string) []byte { + s = strings.Replace(s, " ", "", -1) + s = strings.Replace(s, "\n", "", -1) + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func TestEmitEnabled(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + + numCallback := 0 + var dec *Decoder + dec = NewDecoder(8<<20, func(HeaderField) { + numCallback++ + dec.SetEmitEnabled(false) + }) + if !dec.EmitEnabled() { + t.Errorf("initial emit enabled = false; want true") + } + if _, err := dec.Write(buf.Bytes()); err != nil { + t.Error(err) + } + if numCallback != 1 { + t.Errorf("num callbacks = %d; want 1", numCallback) + } + if dec.EmitEnabled() { + t.Errorf("emit enabled = true; want false") + } +} + +func TestSaveBufLimit(t *testing.T) { + const maxStr = 1 << 10 + var got []HeaderField + dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) { + got = append(got, hf) + }) + dec.SetMaxStringLength(maxStr) + var frag []byte + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "foo"...) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "bar"...) + + if _, err := dec.Write(frag); err != nil { + t.Fatal(err) + } + + want := []HeaderField{{Name: "foo", Value: "bar"}} + if !reflect.DeepEqual(got, want) { + t.Errorf("After small writes, got %v; want %v", got, want) + } + + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, maxStr*3) + frag = append(frag, make([]byte, maxStr*3)...) + + _, err := dec.Write(frag) + if err != ErrStringLength { + t.Fatalf("Write error = %v; want ErrStringLength", err) + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index eb4b1f05cd0..8850e394677 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -48,12 +48,16 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") // maxLen bytes will return ErrStringLength. func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { n := rootHuffmanNode - cur, nbits := uint(0), uint8(0) + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) for _, b := range v { cur = cur<<8 | uint(b) - nbits += 8 - for nbits >= 8 { - idx := byte(cur >> (nbits - 8)) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) n = n.children[idx] if n == nil { return ErrInvalidHuffman @@ -63,22 +67,40 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { return ErrStringLength } buf.WriteByte(n.sym) - nbits -= n.codeLen + cbits -= n.codeLen n = rootHuffmanNode + sbits = cbits } else { - nbits -= 8 + cbits -= 8 } } } - for nbits > 0 { - n = n.children[byte(cur<<(8-nbits))] - if n.children != nil || n.codeLen > nbits { + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { break } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } buf.WriteByte(n.sym) - nbits -= n.codeLen + cbits -= n.codeLen n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman } + if mask := uint(1<= 127 || ('A' <= r && r <= 'Z') { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { return false } } @@ -244,3 +280,72 @@ func (w *bufferedWriter) Flush() error { w.bw = nil return err } + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owners, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go new file mode 100644 index 00000000000..549ff5e4547 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2_test.go @@ -0,0 +1,198 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "flag" + "fmt" + "net/http" + "os/exec" + "strconv" + "strings" + "testing" + + "golang.org/x/net/http2/hpack" +) + +var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") + +func condSkipFailingTest(t *testing.T) { + if !*knownFailing { + t.Skip("Skipping known-failing test without --known_failing") + } +} + +func init() { + DebugGoroutines = true + flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging") +} + +func TestSettingString(t *testing.T) { + tests := []struct { + s Setting + want string + }{ + {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, + {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, + } + for i, tt := range tests { + got := fmt.Sprint(tt.s) + if got != tt.want { + t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) + } + } +} + +type twriter struct { + t testing.TB + st *serverTester // optional +} + +func (w twriter) Write(p []byte) (n int, err error) { + if w.st != nil { + ps := string(p) + for _, phrase := range w.st.logFilter { + if strings.Contains(ps, phrase) { + return len(p), nil // no logging + } + } + } + w.t.Logf("%s", p) + return len(p), nil +} + +// like encodeHeader, but don't add implicit pseudo headers. +func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + } + return buf.Bytes() +} + +// Verify that curl has http2. +func requireCurl(t *testing.T) { + out, err := dockerLogs(curl(t, "--version")) + if err != nil { + t.Skipf("failed to determine curl features; skipping test") + } + if !strings.Contains(string(out), "HTTP2") { + t.Skip("curl doesn't support HTTP2; skipping test") + } +} + +func curl(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run curl in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +// Verify that h2load exists. +func requireH2load(t *testing.T) { + out, err := dockerLogs(h2load(t, "--version")) + if err != nil { + t.Skipf("failed to probe h2load; skipping test: %s", out) + } + if !strings.Contains(string(out), "h2load nghttp2/") { + t.Skipf("h2load not present; skipping test. (Output=%q)", out) + } +} + +func h2load(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run h2load in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +type puppetCommand struct { + fn func(w http.ResponseWriter, r *http.Request) + done chan<- bool +} + +type handlerPuppet struct { + ch chan puppetCommand +} + +func newHandlerPuppet() *handlerPuppet { + return &handlerPuppet{ + ch: make(chan puppetCommand), + } +} + +func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { + for cmd := range p.ch { + cmd.fn(w, r) + cmd.done <- true + } +} + +func (p *handlerPuppet) done() { close(p.ch) } +func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { + done := make(chan bool) + p.ch <- puppetCommand{fn, done} + <-done +} +func dockerLogs(container string) ([]byte, error) { + out, err := exec.Command("docker", "wait", container).CombinedOutput() + if err != nil { + return out, err + } + exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return out, errors.New("unexpected exit status from docker wait") + } + out, err = exec.Command("docker", "logs", container).CombinedOutput() + exec.Command("docker", "rm", container).Run() + if err == nil && exitStatus != 0 { + err = fmt.Errorf("exit status %d: %s", exitStatus, out) + } + return out, err +} + +func kill(container string) { + exec.Command("docker", "kill", container).Run() + exec.Command("docker", "rm", container).Run() +} + +func cleanDate(res *http.Response) { + if d := res.Header["Date"]; len(d) == 1 { + d[0] = "XXX" + } +} + +func TestSorterPoolAllocs(t *testing.T) { + ss := []string{"a", "b", "c"} + h := http.Header{ + "a": nil, + "b": nil, + "c": nil, + } + sorter := new(sorter) + + if allocs := testing.AllocsPerRun(100, func() { + sorter.SortStrings(ss) + }); allocs >= 1 { + t.Logf("SortStrings allocs = %v; want <1", allocs) + } + + if allocs := testing.AllocsPerRun(5, func() { + if len(sorter.Keys(h)) != 3 { + t.Fatal("wrong result") + } + }); allocs > 0 { + t.Logf("Keys allocs = %v; want <1", allocs) + } +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 00000000000..efd2e1282cf --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "crypto/tls" + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 00000000000..28df0c16bf0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "net" + "net/http" +) + +type contextContext interface{} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 72a1fdc9367..53b7a1daf63 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -14,10 +14,13 @@ import ( // io.Pipe except there are no PipeReader/PipeWriter halves, and the // underlying buffer is an interface. (io.Pipe is always unbuffered) type pipe struct { - mu sync.Mutex - c sync.Cond // c.L must point to - b pipeBuffer - err error // read error once empty. non-nil means closed. + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error } type pipeBuffer interface { @@ -26,6 +29,12 @@ type pipeBuffer interface { io.Reader } +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.b.Len() +} + // Read waits until data is available and copies bytes // from the buffer into p. func (p *pipe) Read(d []byte) (n int, err error) { @@ -35,10 +44,17 @@ func (p *pipe) Read(d []byte) (n int, err error) { p.c.L = &p.mu } for { + if p.breakErr != nil { + return 0, p.breakErr + } if p.b.Len() > 0 { return p.b.Read(d) } if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } return 0, p.err } p.c.Wait() @@ -62,13 +78,25 @@ func (p *pipe) Write(d []byte) (n int, err error) { return p.b.Write(d) } -// CloseWithError causes Reads to wake up and return the -// provided err after all data has been read. +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. // // The error must be non-nil. -func (p *pipe) CloseWithError(err error) { +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { if err == nil { - panic("CloseWithError must be non-nil") + panic("err must be non-nil") } p.mu.Lock() defer p.mu.Unlock() @@ -76,7 +104,50 @@ func (p *pipe) CloseWithError(err error) { p.c.L = &p.mu } defer p.c.Signal() - if p.err == nil { - p.err = err + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } } + return p.donec } diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go new file mode 100644 index 00000000000..76322999965 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe_test.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "testing" +) + +func TestPipeClose(t *testing.T) { + var p pipe + p.b = new(bytes.Buffer) + a := errors.New("a") + b := errors.New("b") + p.CloseWithError(a) + p.CloseWithError(b) + _, err := p.Read(make([]byte, 1)) + if err != a { + t.Errorf("err = %v want %v", err, a) + } +} + +func TestPipeDoneChan(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.CloseWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_ErrFirst(t *testing.T) { + var p pipe + p.CloseWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.BreakWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break_ErrFirst(t *testing.T) { + var p pipe + p.BreakWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeCloseWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + const body = "foo" + io.WriteString(p, body) + a := errors.New("test error") + p.CloseWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != body { + t.Errorf("read bytes = %q; want %q", all, body) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } +} + +func TestPipeBreakWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + io.WriteString(p, "foo") + a := errors.New("test err") + p.BreakWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != "" { + t.Errorf("read bytes = %q; want empty string", all) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } +} diff --git a/vendor/golang.org/x/net/http2/priority_test.go b/vendor/golang.org/x/net/http2/priority_test.go new file mode 100644 index 00000000000..a3fe2bb4901 --- /dev/null +++ b/vendor/golang.org/x/net/http2/priority_test.go @@ -0,0 +1,118 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "testing" +) + +func TestPriority(t *testing.T) { + // A -> B + // move A's parent to B + streams := make(map[uint32]*stream) + a := &stream{ + parent: nil, + weight: 16, + } + streams[1] = a + b := &stream{ + parent: a, + weight: 16, + } + streams[2] = b + adjustStreamPriority(streams, 1, PriorityParam{ + Weight: 20, + StreamDep: 2, + }) + if a.parent != b { + t.Errorf("Expected A's parent to be B") + } + if a.weight != 20 { + t.Errorf("Expected A's weight to be 20; got %d", a.weight) + } + if b.parent != nil { + t.Errorf("Expected B to have no parent") + } + if b.weight != 16 { + t.Errorf("Expected B's weight to be 16; got %d", b.weight) + } +} + +func TestPriorityExclusiveZero(t *testing.T) { + // A B and C are all children of the 0 stream. + // Exclusive reprioritization to any of the streams + // should bring the rest of the streams under the + // reprioritized stream + streams := make(map[uint32]*stream) + a := &stream{ + parent: nil, + weight: 16, + } + streams[1] = a + b := &stream{ + parent: nil, + weight: 16, + } + streams[2] = b + c := &stream{ + parent: nil, + weight: 16, + } + streams[3] = c + adjustStreamPriority(streams, 3, PriorityParam{ + Weight: 20, + StreamDep: 0, + Exclusive: true, + }) + if a.parent != c { + t.Errorf("Expected A's parent to be C") + } + if a.weight != 16 { + t.Errorf("Expected A's weight to be 16; got %d", a.weight) + } + if b.parent != c { + t.Errorf("Expected B's parent to be C") + } + if b.weight != 16 { + t.Errorf("Expected B's weight to be 16; got %d", b.weight) + } + if c.parent != nil { + t.Errorf("Expected C to have no parent") + } + if c.weight != 20 { + t.Errorf("Expected C's weight to be 20; got %d", b.weight) + } +} + +func TestPriorityOwnParent(t *testing.T) { + streams := make(map[uint32]*stream) + a := &stream{ + parent: nil, + weight: 16, + } + streams[1] = a + b := &stream{ + parent: a, + weight: 16, + } + streams[2] = b + adjustStreamPriority(streams, 1, PriorityParam{ + Weight: 20, + StreamDep: 1, + }) + if a.parent != nil { + t.Errorf("Expected A's parent to be nil") + } + if a.weight != 20 { + t.Errorf("Expected A's weight to be 20; got %d", a.weight) + } + if b.parent != a { + t.Errorf("Expected B's parent to be A") + } + if b.weight != 16 { + t.Errorf("Expected B's weight to be 16; got %d", b.weight) + } + +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 5fb92cde4b8..f368738f7c7 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -6,8 +6,8 @@ // instead, and make sure that on close we close all open // streams. then remove doneServing? -// TODO: finish GOAWAY support. Consider each incoming frame type and -// whether it should be ignored during a shutdown race. +// TODO: re-audit GOAWAY support. Consider each incoming frame type and +// whether it should be ignored during graceful shutdown. // TODO: disconnect idle clients. GFE seems to do 4 minutes. make // configurable? or maximum number of idle clients and remove the @@ -46,7 +46,11 @@ import ( "log" "net" "net/http" + "net/textproto" "net/url" + "os" + "reflect" + "runtime" "strconv" "strings" "sync" @@ -190,28 +194,80 @@ func ConfigureServer(s *http.Server, conf *Server) error { if testHookOnConn != nil { testHookOnConn() } - conf.handleConn(hs, c, h) + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) } s.TLSNextProto[NextProtoTLS] = protoHandler s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. return nil } -func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + sc := &serverConn{ - srv: srv, - hs: hs, + srv: s, + hs: opts.baseConfig(), conn: c, + baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), bw: newBufferedWriter(c), - handler: h, + handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), wantWriteFrameCh: make(chan frameWriteMsg, 8), wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), - advMaxStreams: srv.maxConcurrentStreams(), + advMaxStreams: s.maxConcurrentStreams(), writeSched: writeScheduler{ maxFrameSize: initialMaxFrameSize, }, @@ -220,17 +276,18 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { serveG: newGoroutineLock(), pushEnabled: true, } + sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField) - sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen()) fr := NewFramer(sc.bw, c) - fr.SetMaxReadFrameSize(srv.maxReadFrameSize()) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr - if tc, ok := c.(*tls.Conn); ok { + if tc, ok := c.(connectionStater); ok { sc.tlsState = new(tls.ConnectionState) *sc.tlsState = tc.ConnectionState() // 9.2 Use of TLS Features @@ -260,7 +317,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { // So for now, do nothing here again. } - if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -282,32 +339,8 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { sc.serve() } -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} - func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("REJECTING conn: %v, %s", err, debug) + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. sc.framer.WriteGoAway(0, err, []byte(debug)) sc.bw.Flush() @@ -321,8 +354,8 @@ type serverConn struct { conn net.Conn bw *bufferedWriter // writing to conn handler http.Handler + baseCtx contextContext framer *Framer - hpackDecoder *hpack.Decoder doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan readFrameResult // written by serverConn.readFrames wantWriteFrameCh chan frameWriteMsg // from handlers -> serve @@ -349,7 +382,6 @@ type serverConn struct { headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - req requestParam // non-zero while reading request headers writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush writeSched writeScheduler @@ -358,22 +390,13 @@ type serverConn struct { goAwayCode ErrCode shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used + freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder } -func (sc *serverConn) maxHeaderStringLen() int { - v := sc.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - func (sc *serverConn) maxHeaderListSize() uint32 { n := sc.hs.MaxHeaderBytes if n <= 0 { @@ -386,21 +409,6 @@ func (sc *serverConn) maxHeaderListSize() uint32 { return uint32(n + typicalHeaders*perFieldOverhead) } -// requestParam is the state of the next request, initialized over -// potentially several frames HEADERS + zero or more CONTINUATION -// frames. -type requestParam struct { - // stream is non-nil if we're reading (HEADER or CONTINUATION) - // frames for a request (but not DATA). - stream *stream - header http.Header - method, path string - scheme, authority string - sawRegularHeader bool // saw a non-pseudo header already - invalidHeader bool // an invalid header was seen - headerListSize int64 // actually uint32, but easier math this way -} - // stream represents a stream. This is the minimal metadata needed by // the serve goroutine. Most of the actual stream state is owned by // the http.Handler's goroutine in the responseWriter. Because the @@ -410,20 +418,30 @@ type requestParam struct { // responseWriter's state field. type stream struct { // immutable: - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - weight uint8 - state streamState - sentReset bool // only true once detached from streams map - gotReset bool // only true once detacted from streams map + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + sentReset bool // only true once detached from streams map + gotReset bool // only true once detacted from streams map + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + reqBuf []byte + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer } func (sc *serverConn) Framer() *Framer { return sc.framer } @@ -474,12 +492,55 @@ func (sc *serverConn) logf(format string, args ...interface{}) { } } +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } - str := err.Error() - if err == io.EOF || strings.Contains(str, "use of closed network connection") { + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { // Boring, expected errors. sc.vlogf(format, args...) } else { @@ -487,55 +548,6 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } -func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) { - sc.serveG.check() - sc.vlogf("got header field %+v", f) - switch { - case !validHeader(f.Name): - sc.req.invalidHeader = true - case strings.HasPrefix(f.Name, ":"): - if sc.req.sawRegularHeader { - sc.logf("pseudo-header after regular header") - sc.req.invalidHeader = true - return - } - var dst *string - switch f.Name { - case ":method": - dst = &sc.req.method - case ":path": - dst = &sc.req.path - case ":scheme": - dst = &sc.req.scheme - case ":authority": - dst = &sc.req.authority - default: - // 8.1.2.1 Pseudo-Header Fields - // "Endpoints MUST treat a request or response - // that contains undefined or invalid - // pseudo-header fields as malformed (Section - // 8.1.2.6)." - sc.logf("invalid pseudo-header %q", f.Name) - sc.req.invalidHeader = true - return - } - if *dst != "" { - sc.logf("duplicate pseudo-header %q sent", f.Name) - sc.req.invalidHeader = true - return - } - *dst = f.Value - default: - sc.req.sawRegularHeader = true - sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value) - const headerFieldOverhead = 32 // per spec - sc.req.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead - if sc.req.headerListSize > int64(sc.maxHeaderListSize()) { - sc.hpackDecoder.SetEmitEnabled(false) - } - } -} - func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := commonCanonHeader[v] @@ -570,10 +582,11 @@ type readFrameResult struct { // It's run on its own goroutine. func (sc *serverConn) readFrames() { gate := make(gate) + gateDone := gate.Done for { f, err := sc.framer.ReadFrame() select { - case sc.readFrameCh <- readFrameResult{f, err, gate.Done}: + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: case <-sc.doneServing: return } @@ -582,6 +595,9 @@ func (sc *serverConn) readFrames() { case <-sc.doneServing: return } + if terminalReadFrameError(err) { + return + } } } @@ -615,6 +631,7 @@ func (sc *serverConn) stopShutdownTimer() { } func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. if testHookOnPanicMu != nil { testHookOnPanicMu.Lock() defer testHookOnPanicMu.Unlock() @@ -636,7 +653,9 @@ func (sc *serverConn) serve() { defer sc.stopShutdownTimer() defer close(sc.doneServing) // unblocks handlers trying to send - sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } sc.writeFrame(frameWriteMsg{ write: writeSettings{ @@ -653,7 +672,7 @@ func (sc *serverConn) serve() { sc.unackedSettings++ if err := sc.readPreface(); err != nil { - sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } // Now that we've got the preface, get us out of the @@ -719,7 +738,9 @@ func (sc *serverConn) readPreface() error { return errors.New("timeout waiting for client preface") case err := <-errc: if err == nil { - sc.vlogf("client %v said hello", sc.conn.RemoteAddr()) + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } } return err } @@ -729,26 +750,28 @@ var errChanPool = sync.Pool{ New: func() interface{} { return make(chan error, 1) }, } -// writeDataFromHandler writes the data described in req to stream.id. -// -// The flow control currently happens in the Handler where it waits -// for 1 or more bytes to be available to then write here. So at this -// point we know that we have flow control. But this might have to -// change when priority is implemented, so the serve goroutine knows -// the total amount of bytes waiting to be sent and can can have more -// scheduling decisions available. -func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData) error { +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(frameWriteMsg{ - write: writeData, + write: writeArg, stream: stream, done: ch, }) if err != nil { return err } + var frameWriteDone bool // the frame write is done (successfully or not) select { case err = <-ch: + frameWriteDone = true case <-sc.doneServing: return errClientDisconnected case <-stream.cw: @@ -761,11 +784,15 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData) // close. select { case err = <-ch: + frameWriteDone = true default: return errStreamClosed } } errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } return err } @@ -798,7 +825,23 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { // If you're not on the serve goroutine, use writeFrameFromHandler instead. func (sc *serverConn) writeFrame(wm frameWriteMsg) { sc.serveG.check() - sc.writeSched.add(wm) + + var ignoreWrite bool + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wm.write.(type) { + case *writeResHeaders: + wm.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wm.stream.wroteHeaders { + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.add(wm) + } sc.scheduleFrameWrite() } @@ -831,6 +874,11 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { go sc.writeFrameAsync(wm) } +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + // wroteFrame is called on the serve goroutine with the result of // whatever happened on writeFrameAsync. func (sc *serverConn) wroteFrame(res frameWriteResult) { @@ -845,6 +893,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { closeStream := endsStream(wm.write) + if _, ok := wm.write.(handlerPanicRST); ok { + sc.closeStream(st, errHandlerPanicked) + } + // Reply (if requested) to the blocked ServeHTTP goroutine. if ch := wm.done; ch != nil { select { @@ -957,18 +1009,6 @@ func (sc *serverConn) resetStream(se StreamError) { } } -// curHeaderStreamID returns the stream ID of the header block we're -// currently in the middle of reading. If this returns non-zero, the -// next frame must be a CONTINUATION with this stream id. -func (sc *serverConn) curHeaderStreamID() uint32 { - sc.serveG.check() - st := sc.req.stream - if st == nil { - return 0 - } - return st.id -} - // processFrameFromReader processes the serve loop's read from readFrameCh from the // frame-reading goroutine. // processFrameFromReader returns whether the connection should be kept open. @@ -980,7 +1020,7 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFrameSize) return true // goAway will close the loop } - clientGone := err == io.EOF || strings.Contains(err.Error(), "use of closed network connection") + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) if clientGone { // TODO: could we also get into this state if // the peer does a half close @@ -994,7 +1034,9 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { } } else { f := res.f - sc.vlogf("got %v: %#v", f.Header(), f) + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } err = sc.processFrame(f) if err == nil { return true @@ -1009,14 +1051,14 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: - sc.logf("%v: %v", sc.conn.RemoteAddr(), ev) + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown default: if res.err != nil { - sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) } else { - sc.logf("disconnection due to other error: %v", err) + sc.logf("http2: server closing client connection: %v", err) } return false } @@ -1033,21 +1075,11 @@ func (sc *serverConn) processFrame(f Frame) error { sc.sawFirstSettings = true } - if s := sc.curHeaderStreamID(); s != 0 { - if cf, ok := f.(*ContinuationFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } else if cf.Header().StreamID != s { - return ConnectionError(ErrCodeProtocol) - } - } - switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) - case *HeadersFrame: + case *MetaHeadersFrame: return sc.processHeaders(f) - case *ContinuationFrame: - return sc.processContinuation(f) case *WindowUpdateFrame: return sc.processWindowUpdate(f) case *PingFrame: @@ -1063,14 +1095,14 @@ func (sc *serverConn) processFrame(f Frame) error { // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) default: - sc.vlogf("Ignoring frame: %v", f.Header()) + sc.vlogf("http2: server ignoring frame: %v", f.Header()) return nil } } func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() - if f.Flags.Has(FlagSettingsAck) { + if f.IsAck() { // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -1126,6 +1158,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { } if st != nil { st.gotReset = true + st.cancelCtx() sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) } return nil @@ -1147,6 +1180,18 @@ func (sc *serverConn) closeStream(st *stream, err error) { } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.forgetStream(st.id) + if st.reqBuf != nil { + // Stash this request body buffer (64k) away for reuse + // by a future POST/PUT/etc. + // + // TODO(bradfitz): share on the server? sync.Pool? + // Server requires locks and might hurt contention. + // sync.Pool might work, or might be worse, depending + // on goroutine CPU migrations. (get and put on + // separate CPUs). Maybe a mix of strategies. But + // this is an easy win for now. + sc.freeRequestBodyBuf = st.reqBuf + } } func (sc *serverConn) processSettings(f *SettingsFrame) error { @@ -1174,7 +1219,9 @@ func (sc *serverConn) processSetting(s Setting) error { if err := s.Valid(); err != nil { return err } - sc.vlogf("processing setting %v", s) + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } switch s.ID { case SettingHeaderTableSize: sc.headerTableSize = s.Val @@ -1193,6 +1240,9 @@ func (sc *serverConn) processSetting(s Setting) error { // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } } return nil } @@ -1232,7 +1282,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // with a stream error (Section 5.4.2) of type STREAM_CLOSED." id := f.Header().StreamID st, ok := sc.streams[id] - if !ok || st.state != stateOpen { + if !ok || st.state != stateOpen || st.gotTrailerHeader { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that // the http.Handler returned, so it's done reading & @@ -1266,18 +1316,39 @@ func (sc *serverConn) processData(f *DataFrame) error { st.bodyBytes += int64(len(data)) } if f.StreamEnded() { - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote + st.endStream() } return nil } -func (sc *serverConn) processHeaders(f *HeadersFrame) error { +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.Header().StreamID if sc.inGoAway { @@ -1285,22 +1356,39 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { return nil } // http://http2.github.io/http2-spec/#rfc.section.5.1.1 - if id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil { - // Streams initiated by a client MUST use odd-numbered - // stream identifiers. [...] The identifier of a newly - // established stream MUST be numerically greater than all - // streams that the initiating endpoint has opened or - // reserved. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { return ConnectionError(ErrCodeProtocol) } - if id > sc.maxStreamID { - sc.maxStreamID = id + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + st := sc.streams[f.Header().StreamID] + if st != nil { + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxStreamID { + return ConnectionError(ErrCodeProtocol) } - st := &stream{ - id: id, - state: stateOpen, + sc.maxStreamID = id + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st = &stream{ + sc: sc, + id: id, + state: stateOpen, + ctx: ctx, + cancelCtx: cancelCtx, } if f.StreamEnded() { st.state = stateHalfClosedRemote @@ -1320,35 +1408,6 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { if sc.curOpenStreams == 1 { sc.setConnState(http.StateActive) } - sc.req = requestParam{ - stream: st, - header: make(http.Header), - } - sc.hpackDecoder.SetEmitEnabled(true) - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processContinuation(f *ContinuationFrame) error { - sc.serveG.check() - st := sc.streams[f.Header().StreamID] - if st == nil || sc.curHeaderStreamID() != st.id { - return ConnectionError(ErrCodeProtocol) - } - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { - sc.serveG.check() - if _, err := sc.hpackDecoder.Write(frag); err != nil { - return ConnectionError(ErrCodeCompression) - } - if !end { - return nil - } - if err := sc.hpackDecoder.Close(); err != nil { - return ConnectionError(ErrCodeCompression) - } - defer sc.resetPendingRequest() if sc.curOpenStreams > sc.advMaxStreams { // "Endpoints MUST NOT exceed the limit set by their // peer. An endpoint that receives a HEADERS frame @@ -1368,23 +1427,59 @@ func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bo return StreamError{st.id, ErrCodeRefusedStream} } - rw, req, err := sc.newWriterAndRequest() + rw, req, err := sc.newWriterAndRequest(st, f) if err != nil { return err } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } st.body = req.Body.(*requestBody).pipe // may be nil st.declBodyBytes = req.ContentLength handler := sc.handler.ServeHTTP - if !sc.hpackDecoder.EmitEnabled() { + if f.Truncated { // Their header list was too long. Send a 431 error. handler = handleHeaderListTooLong + } else if err := checkValidHTTP2Request(req); err != nil { + handler = new400Handler(err) } go sc.runHandler(rw, req, handler) return nil } +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return StreamError{st.id, ErrCodeProtocol} + } + + if len(f.PseudoFields()) > 0 { + return StreamError{st.id, ErrCodeProtocol} + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return StreamError{st.id, ErrCodeProtocol} + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + func (sc *serverConn) processPriority(f *PriorityFrame) error { adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) return nil @@ -1426,19 +1521,21 @@ func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority } } -// resetPendingRequest zeros out all state related to a HEADERS frame -// and its zero or more CONTINUATION frames sent to start a new -// request. -func (sc *serverConn) resetPendingRequest() { +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - sc.req = requestParam{} -} -func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) { - sc.serveG.check() - rp := &sc.req - if rp.invalidHeader || rp.method == "" || rp.path == "" || - (rp.scheme != "https" && rp.scheme != "http") { + method := f.PseudoValue("method") + path := f.PseudoValue("path") + scheme := f.PseudoValue("scheme") + authority := f.PseudoValue("authority") + + isConnect := method == "CONNECT" + if isConnect { + if path != "" || scheme != "" || authority == "" { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + } else if method == "" || path == "" || + (scheme != "https" && scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -1449,55 +1546,100 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + + bodyOpen := !f.StreamEnded() + if method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + + if scheme == "https" { tlsState = sc.tlsState } - authority := rp.authority + + header := make(http.Header) + for _, hf := range f.RegularFields() { + header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if authority == "" { - authority = rp.header.Get("Host") + authority = header.Get("Host") } - needsContinue := rp.header.Get("Expect") == "100-continue" + needsContinue := header.Get("Expect") == "100-continue" if needsContinue { - rp.header.Del("Expect") + header.Del("Expect") } // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) + if cookies := header["Cookie"]; len(cookies) > 1 { + header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } } - bodyOpen := rp.stream.state == stateOpen + delete(header, "Trailer") + body := &requestBody{ conn: sc, - stream: rp.stream, + stream: st, needsContinue: needsContinue, } - // TODO: handle asterisk '*' requests + test - url, err := url.ParseRequestURI(rp.path) - if err != nil { - // TODO: find the right error code? - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + var url_ *url.URL + var requestURI string + if isConnect { + url_ = &url.URL{Host: authority} + requestURI = authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(path) + if err != nil { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + requestURI = path } req := &http.Request{ - Method: rp.method, - URL: url, + Method: method, + URL: url_, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: rp.path, + Header: header, + RequestURI: requestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, Host: authority, Body: body, + Trailer: trailer, } + req = requestWithContext(req, st.ctx) if bodyOpen { + // Disabled, per golang.org/issue/14960: + // st.reqBuf = sc.getRequestBodyBuf() + // TODO: remove this 64k of garbage per request (again, but without a data race): + buf := make([]byte, initialWindowSize) + body.pipe = &pipe{ - b: &fixedBuffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX + b: &fixedBuffer{buf: buf}, } - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 @@ -1510,7 +1652,7 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err rws.conn = sc rws.bw = bwSave rws.bw.Reset(chunkWriter{rws}) - rws.stream = rp.stream + rws.stream = st rws.req = req rws.body = body @@ -1518,11 +1660,37 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err return rw, req, nil } +func (sc *serverConn) getRequestBodyBuf() []byte { + sc.serveG.check() + if buf := sc.freeRequestBodyBuf; buf != nil { + sc.freeRequestBodyBuf = nil + return buf + } + return make([]byte, initialWindowSize) +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - defer rw.handlerDone() - // TODO: catch panics like net/http.Server + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + // Same as net/http: + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.writeFrameFromHandler(frameWriteMsg{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + return + } + rw.handlerDone() + }() handler(rw, req) + didPanic = false } func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { @@ -1588,7 +1756,10 @@ type bodyReadMsg struct { // and schedules flow control tokens to be sent. func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { sc.serveG.checkNotOn() // NOT on - sc.bodyReadCh <- bodyReadMsg{st, n} + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } } func (sc *serverConn) noteBodyRead(st *stream, n int) { @@ -1655,7 +1826,7 @@ type requestBody struct { func (b *requestBody) Close() error { if b.pipe != nil { - b.pipe.CloseWithError(errClosedBody) + b.pipe.BreakWithError(errClosedBody) } b.closed = true return nil @@ -1706,11 +1877,14 @@ type responseWriterState struct { // mutated by http.Handler goroutine: handlerHeader http.Header // nil until called snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk status int // status code passed to WriteHeader wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - curWrite writeData + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 closeNotifierMu sync.Mutex // guards closeNotifierCh closeNotifierCh chan bool // nil until first used @@ -1720,6 +1894,23 @@ type chunkWriter struct{ rws *responseWriterState } func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 2616 14.40. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + // writeChunk writes chunks from the bufio.Writer. But because // bufio.Writer may bypass its chunking, sometimes p may be // arbitrarily large. @@ -1730,16 +1921,38 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if !rws.wroteHeader { rws.writeHeader(200) } + + isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true - var ctype, clen string // implicit ones, if we can calculate it - if rws.handlerDone && rws.snapHeader.Get("Content-Length") == "" { + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } - if rws.snapHeader.Get("Content-Type") == "" { + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) { ctype = http.DetectContentType(p) } - endStream := rws.handlerDone && len(p) == 0 + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: rws.status, @@ -1747,6 +1960,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { endStream: endStream, contentType: ctype, contentLength: clen, + date: date, }) if err != nil { return 0, err @@ -1755,23 +1969,89 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return 0, nil } } + if isHeadResp { + return len(p), nil + } if len(p) == 0 && !rws.handlerDone { return 0, nil } - // Reuse curWrite (which as a pointer fits into the - // 'writeFramer' interface value) for each write to avoid an - // allocation per write. - curWrite := &rws.curWrite - curWrite.streamID = rws.stream.id - curWrite.p = p - curWrite.endStream = rws.handlerDone - if err := rws.conn.writeDataFromHandler(rws.stream, curWrite); err != nil { - return 0, err + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + return len(p), err } return len(p), nil } +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + func (w *responseWriter) Flush() { rws := w.rws if rws == nil { @@ -1874,6 +2154,15 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, if !rws.wroteHeader { w.WriteHeader(200) } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + if dataB != nil { return rws.bw.Write(dataB) } else { @@ -1883,11 +2172,92 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - if rws == nil { - panic("handlerDone called twice") - } rws.handlerDone = true w.Flush() w.rws = nil responseWriterStatePool.Put(rws) } + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2Request checks whether req is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2Request(req *http.Request) error { + for _, h := range connHeaders { + if _, ok := req.Header[h]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", h) + } + } + te := req.Header["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go new file mode 100644 index 00000000000..a45905f3473 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_test.go @@ -0,0 +1,3303 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") + +func stderrv() io.Writer { + if *stderrVerbose { + return os.Stderr + } + + return ioutil.Discard +} + +type serverTester struct { + cc net.Conn // client conn + t testing.TB + ts *httptest.Server + fr *Framer + logBuf *bytes.Buffer + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc + sc *serverConn + hpackDec *hpack.Decoder + decodedHeaders [][2]string + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder + + // reading frames: + frc chan Frame + frErrc chan error + readTimer *time.Timer +} + +func init() { + testHookOnPanicMu = new(sync.Mutex) +} + +func resetHooks() { + testHookOnPanicMu.Lock() + testHookOnPanic = nil + testHookOnPanicMu.Unlock() +} + +type serverTesterOpt string + +var optOnlyServer = serverTesterOpt("only_server") +var optQuiet = serverTesterOpt("quiet_logging") + +func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { + resetHooks() + + logBuf := new(bytes.Buffer) + ts := httptest.NewUnstartedServer(handler) + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + // The h2-14 is temporary, until curl is updated. (as used by unit tests + // in Docker) + NextProtos: []string{NextProtoTLS, "h2-14"}, + } + + var onlyServer, quiet bool + for _, opt := range opts { + switch v := opt.(type) { + case func(*tls.Config): + v(tlsConfig) + case func(*httptest.Server): + v(ts) + case serverTesterOpt: + switch v { + case optOnlyServer: + onlyServer = true + case optQuiet: + quiet = true + } + case func(net.Conn, http.ConnState): + ts.Config.ConnState = v + default: + t.Fatalf("unknown newServerTester option type %T", v) + } + } + + ConfigureServer(ts.Config, &Server{}) + + st := &serverTester{ + t: t, + ts: ts, + logBuf: logBuf, + frc: make(chan Frame, 1), + frErrc: make(chan error, 1), + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) + + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + if quiet { + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + } else { + ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, logBuf), "", log.LstdFlags) + } + ts.StartTLS() + + if VerboseLogs { + t.Logf("Running test server at: %s", ts.URL) + } + testHookGetServerConn = func(v *serverConn) { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc = v + st.sc.testHookCh = make(chan func(int)) + } + log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) + if !onlyServer { + cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + st.cc = cc + st.fr = NewFramer(cc, cc) + } + return st +} + +func (st *serverTester) closeConn() { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc.conn.Close() +} + +func (st *serverTester) addLogFilter(phrase string) { + st.logFilter = append(st.logFilter, phrase) +} + +func (st *serverTester) stream(id uint32) *stream { + ch := make(chan *stream, 1) + st.sc.testHookCh <- func(int) { + ch <- st.sc.streams[id] + } + return <-ch +} + +func (st *serverTester) streamState(id uint32) streamState { + ch := make(chan streamState, 1) + st.sc.testHookCh <- func(int) { + state, _ := st.sc.state(id) + ch <- state + } + return <-ch +} + +// loopNum reports how many times this conn's select loop has gone around. +func (st *serverTester) loopNum() int { + lastc := make(chan int, 1) + st.sc.testHookCh <- func(loopNum int) { + lastc <- loopNum + } + return <-lastc +} + +// awaitIdle heuristically awaits for the server conn's select loop to be idle. +// The heuristic is that the server connection's serve loop must schedule +// 50 times in a row without any channel sends or receives occurring. +func (st *serverTester) awaitIdle() { + remain := 50 + last := st.loopNum() + for remain > 0 { + n := st.loopNum() + if n == last+1 { + remain-- + } else { + remain = 50 + } + last = n + } +} + +func (st *serverTester) Close() { + if st.t.Failed() { + // If we failed already (and are likely in a Fatal, + // unwindowing), force close the connection, so the + // httptest.Server doesn't wait forever for the conn + // to close. + if st.cc != nil { + st.cc.Close() + } + } + st.ts.Close() + if st.cc != nil { + st.cc.Close() + } + log.SetOutput(os.Stderr) +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.writePreface() + st.writeInitialSettings() + st.wantSettings() + st.writeSettingsAck() + st.wantSettingsAck() +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write(clientPreface) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(clientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) writeHeaders(p HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeaderRaw is the magic-free version of encodeHeader. +// It takes 0 or more (k, v) pairs and encodes them. +func (st *serverTester) encodeHeaderRaw(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + st.headerBuf.Reset() + for len(headers) > 0 { + k, v := headers[0], headers[1] + st.encodeHeaderField(k, v) + headers = headers[2:] + } + return st.headerBuf.Bytes() +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":path", ":scheme"} + vals := map[string][]string{ + ":method": {"GET"}, + ":path": {"/"}, + ":scheme": {"https"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. +func (st *serverTester) bodylessReq1(headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) readFrame() (Frame, error) { + go func() { + fr, err := st.fr.ReadFrame() + if err != nil { + st.frErrc <- err + } else { + st.frc <- fr + } + }() + t := st.readTimer + if t == nil { + t = time.NewTimer(2 * time.Second) + st.readTimer = t + } + t.Reset(2 * time.Second) + defer t.Stop() + select { + case f := <-st.frc: + return f, nil + case err := <-st.frErrc: + return nil, err + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +func (st *serverTester) wantHeaders() *HeadersFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) + } + hf, ok := f.(*HeadersFrame) + if !ok { + st.t.Fatalf("got a %T; want *HeadersFrame", f) + } + return hf +} + +func (st *serverTester) wantContinuation() *ContinuationFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) + } + cf, ok := f.(*ContinuationFrame) + if !ok { + st.t.Fatalf("got a %T; want *ContinuationFrame", f) + } + return cf +} + +func (st *serverTester) wantData() *DataFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a DATA frame: %v", err) + } + df, ok := f.(*DataFrame) + if !ok { + st.t.Fatalf("got a %T; want *DataFrame", f) + } + return df +} + +func (st *serverTester) wantSettings() *SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantPing() *PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a PING frame: %v", err) + } + pf, ok := f.(*PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *PingFrame", f) + } + return pf +} + +func (st *serverTester) wantGoAway() *GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) + } + gf, ok := f.(*GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *GoAwayFrame", f) + } + return gf +} + +func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) + } + rs, ok := f.(*RSTStreamFrame) + if !ok { + st.t.Fatalf("got a %T; want *RSTStreamFrame", f) + } + if rs.FrameHeader.StreamID != streamID { + st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) + } + if rs.ErrCode != errCode { + st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) + } +} + +func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) + } + wu, ok := f.(*WindowUpdateFrame) + if !ok { + st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) + } + if wu.FrameHeader.StreamID != streamID { + st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) + } + if wu.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) + } +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + +} + +func TestServer(t *testing.T) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + gotReq <- true + }) + defer st.Close() + + covers("3.5", ` + The server connection preface consists of a potentially empty + SETTINGS frame ([SETTINGS]) that MUST be the first frame the + server sends in the HTTP/2 connection. + `) + + st.writePreface() + st.writeInitialSettings() + st.wantSettings() + st.writeSettingsAck() + st.wantSettingsAck() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func TestServer_Request_Get(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("foo-bar", "some-value"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "GET" { + t.Errorf("Method = %q; want GET", r.Method) + } + if r.URL.Path != "/" { + t.Errorf("URL.Path = %q; want /", r.URL.Path) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if r.Close { + t.Error("Close = true; want false") + } + if !strings.Contains(r.RemoteAddr, ":") { + t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) + } + if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { + t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) + } + wantHeader := http.Header{ + "Foo-Bar": []string{"some-value"}, + } + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Get_PathSlashes(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":path", "/%2f/"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.RequestURI != "/%2f/" { + t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) + } + if r.URL.Path != "///" { + t.Errorf("URL.Path = %q; want ///", r.URL.Path) + } + }) +} + +// TODO: add a test with EndStream=true on the HEADERS but setting a +// Content-Length anyway. Should we just omit it and force it to +// zero? + +func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { + testBodyContents(t, -1, "", func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, nil) // just kidding. empty body. + }) +} + +func TestServer_Request_Post_Body_OneData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_TwoData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, false, []byte(content[:5])) + st.writeData(1, true, []byte(content[5:])) + }) +} + +func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { + const content = "Some content" + testBodyContents(t, int64(len(content)), content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", strconv.Itoa(len(content)), + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { + testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "3", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12")) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { + testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "4", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12345")) + }) +} + +func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(all) != wantBody { + t.Errorf("Read = %q; want %q", all, wantBody) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err == nil { + t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", + wantReadError, all) + } + if !strings.Contains(err.Error(), wantReadError) { + t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +// Using a Host header, instead of :authority +func TestServer_Request_Get_Host(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("host", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +// Using an :authority pseudo-header, instead of Host +func TestServer_Request_Get_Authority(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +func TestServer_Request_WithContinuation(t *testing.T) { + wantHeader := http.Header{ + "Foo-One": []string{"value-one"}, + "Foo-Two": []string{"value-two"}, + "Foo-Three": []string{"value-three"}, + } + testServerRequest(t, func(st *serverTester) { + fullHeaders := st.encodeHeader( + "foo-one", "value-one", + "foo-two", "value-two", + "foo-three", "value-three", + ) + remain := fullHeaders + chunks := 0 + for len(remain) > 0 { + const maxChunkSize = 5 + chunk := remain + if len(chunk) > maxChunkSize { + chunk = chunk[:maxChunkSize] + } + remain = remain[len(chunk):] + + if chunks == 0 { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: chunk, + EndStream: true, // no DATA frames + EndHeaders: false, // we'll have continuation frames + }) + } else { + err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) + if err != nil { + t.Fatal(err) + } + } + chunks++ + } + if chunks < 2 { + t.Fatal("too few chunks") + } + }, func(r *http.Request) { + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + }) +} + +// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") +func TestServer_Request_CookieConcat(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.bodylessReq1( + ":authority", host, + "cookie", "a=b", + "cookie", "c=d", + "cookie", "e=f", + ) + }, func(r *http.Request) { + const want = "a=b; c=d; e=f" + if got := r.Header.Get("Cookie"); got != want { + t.Errorf("Cookie = %q; want %q", got, want) + } + }) +} + +func TestServer_Request_Reject_CapitalHeader(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") }) +} + +func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") }) +} + +func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) +} + +func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid value" ... + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("duplicate pseudo-header") + st.bodylessReq1(":method", "GET", ":method", "POST") + }) +} + +func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All pseudo-header fields MUST appear in the header block + // before regular header fields. Any request or response that + // contains a pseudo-header field that appears in a header + // block after a regular header field MUST be treated as + // malformed (Section 8.1.2.6)." + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("pseudo-header after regular header") + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) + enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) + enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) + enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) +} + +func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) +} + +func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) + st.bodylessReq1(":unknown_thing", "") + }) +} + +func testRejectRequest(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Fatal("server request made it to handler; should've been rejected") + }) + defer st.Close() + + st.greet() + send(st) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func TestServer_Request_Connect(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if g, w := r.Method, "CONNECT"; g != w { + t.Errorf("Method = %q; want %q", g, w) + } + if g, w := r.RequestURI, "example.com:123"; g != w { + t.Errorf("RequestURI = %q; want %q", g, w) + } + if g, w := r.URL.Host, "example.com:123"; g != w { + t.Errorf("URL.Host = %q; want %q", g, w) + } + }) +} + +func TestServer_Request_Connect_InvalidPath(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":path", "/bogus", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Connect_InvalidScheme(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":scheme", "https", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Ping(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Server should ignore this one, since it has ACK set. + ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + if err := st.fr.WritePing(true, ackPingData); err != nil { + t.Fatal(err) + } + + // But the server should reply to this one, since ACK is false. + pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} + if err := st.fr.WritePing(false, pingData); err != nil { + t.Fatal(err) + } + + pf := st.wantPing() + if !pf.Flags.Has(FlagPingAck) { + t.Error("response ping doesn't have ACK set") + } + if pf.Data != pingData { + t.Errorf("response ping has data %q; want %q", pf.Data, pingData) + } +} + +func TestServer_RejectsLargeFrames(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("see golang.org/issue/13434") + } + + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Write too large of a frame (too large by one byte) + // We ignore the return value because it's expected that the server + // will only read the first 9 bytes (the headre) and then disconnect. + st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) + + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFrameSize { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) + } + if st.logBuf.Len() != 0 { + // Previously we spun here for a bit until the GOAWAY disconnect + // timer fired, logging while we fired. + t.Errorf("unexpected server output: %.500s\n", st.logBuf.Bytes()) + } +} + +func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // data coming + EndHeaders: true, + }) + st.writeData(1, false, []byte("abcdef")) + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + st.writeData(1, true, []byte("ghijkl")) // END_STREAM here + puppet.do(readBodyHandler(t, "ghi")) + puppet.do(readBodyHandler(t, "jkl")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM +} + +func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { + t.Fatal(err) + } + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFlowControl { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) + } + if gf.LastStreamID != 0 { + t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) + } +} + +func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { + inHandler := make(chan bool) + blockHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-blockHandler + }) + defer st.Close() + defer close(blockHandler) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + // Send a bogus window update: + if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { + t.Fatal(err) + } + st.wantRSTStream(1, ErrCodeFlowControl) +} + +// testServerPostUnblock sends a hanging POST with unsent data to handler, +// then runs fn once in the handler, and verifies that the error returned from +// handler is acceptable. It fails if takes over 5 seconds for handler to exit. +func testServerPostUnblock(t *testing.T, + handler func(http.ResponseWriter, *http.Request) error, + fn func(*serverTester), + checkErr func(error), + otherHeaders ...string) { + inHandler := make(chan bool) + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + errc <- handler(w, r) + }) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + fn(st) + select { + case err := <-errc: + if checkErr != nil { + checkErr(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Handler to return") + } + st.Close() +} + +func TestServer_RSTStream_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, + func(err error) { + want := StreamError{StreamID: 0x1, Code: 0x8} + if !reflect.DeepEqual(err, want) { + t.Errorf("Read error = %v; want %v", err, want) + } + }, + ) +} + +func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + // Run this test a bunch, because it doesn't always + // deadlock. But with a bunch, it did. + n := 50 + if testing.Short() { + n = 5 + } + for i := 0; i < n; i++ { + testServer_RSTStream_Unblocks_Header_Write(t) + } +} + +func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + inHandler := make(chan bool, 1) + unblockHandler := make(chan bool, 1) + headerWritten := make(chan bool, 1) + wroteRST := make(chan bool, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-wroteRST + w.Header().Set("foo", "bar") + w.WriteHeader(200) + w.(http.Flusher).Flush() + headerWritten <- true + <-unblockHandler + }) + defer st.Close() + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + wroteRST <- true + st.awaitIdle() + select { + case <-headerWritten: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for header write") + } + unblockHandler <- true +} + +func TestServer_DeadConn_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { st.cc.Close() }, + func(err error) { + if err == nil { + t.Error("unexpected nil error from Request.Body.Read") + } + }, + ) +} + +var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { + <-w.(http.CloseNotifier).CloseNotify() + return nil +} + +func TestServer_CloseNotify_After_RSTStream(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, nil) +} + +func TestServer_CloseNotify_After_ConnClose(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) +} + +// that CloseNotify unblocks after a stream error due to the client's +// problem that's unrelated to them explicitly canceling it (which is +// TestServer_CloseNotify_After_RSTStream above) +func TestServer_CloseNotify_After_StreamError(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + // data longer than declared Content-Length => stream error + st.writeData(1, true, []byte("1234")) + }, nil, "content-length", "3") +} + +func TestServer_StateTransitions(t *testing.T) { + var st *serverTester + inHandler := make(chan bool) + writeData := make(chan bool) + leaveHandler := make(chan bool) + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + if st.stream(1) == nil { + t.Errorf("nil stream 1 in handler") + } + if got, want := st.streamState(1), stateOpen; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + writeData <- true + if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { + t.Errorf("body read = %d, %v; want 0, EOF", n, err) + } + if got, want := st.streamState(1), stateHalfClosedRemote; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + + <-leaveHandler + }) + st.greet() + if st.stream(1) != nil { + t.Fatal("stream 1 should be empty") + } + if got := st.streamState(1); got != stateIdle { + t.Fatalf("stream 1 should be idle; got %v", got) + } + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + <-writeData + st.writeData(1, true, nil) + + leaveHandler <- true + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("expected END_STREAM flag") + } + + if got, want := st.streamState(1), stateClosed; got != want { + t.Errorf("at end, state is %v; want %v", got, want) + } + if st.stream(1) != nil { + t.Fatal("at end, stream 1 should be gone") + } +} + +// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + st.writeHeaders(HeadersFrameParam{ // Not a continuation. + StreamID: 3, // different stream. + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// test HEADERS w/o EndHeaders + PING (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WritePing(false, [8]byte{}); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) +func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID +func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// No HEADERS on stream 0. +func TestServer_Rejects_Headers0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 0, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// No CONTINUATION on stream 0. +func TestServer_Rejects_Continuation0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Rejects_PushPromise(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + pp := PushPromiseParam{ + StreamID: 1, + PromiseID: 3, + } + if err := st.fr.WritePushPromise(pp); err != nil { + t.Fatal(err) + } + }) +} + +// testServerRejectsConn tests that the server hangs up with a GOAWAY +// frame and a server close after the client does something +// deserving a CONNECTION_ERROR. +func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + st.addLogFilter("connection error: PROTOCOL_ERROR") + defer st.Close() + st.greet() + writeReq(st) + + st.wantGoAway() + errc := make(chan error, 1) + go func() { + fr, err := st.fr.ReadFrame() + if err == nil { + err = fmt.Errorf("got frame of type %T", fr) + } + errc <- err + }() + select { + case err := <-errc: + if err != io.EOF { + t.Errorf("ReadFrame = %v; want io.EOF", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for disconnect") + } +} + +// testServerRejectsStream tests that the server sends a RST_STREAM with the provided +// error code after a client sends a bogus request. +func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + st.greet() + writeReq(st) + st.wantRSTStream(1, code) +} + +// testServerRequest sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then verify that the +// *http.Request is built correctly in checkReq. +func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + checkReq(r) + gotReq <- true + }) + defer st.Close() + + st.greet() + writeReq(st) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func getSlash(st *serverTester) { st.bodylessReq1() } + +func TestServer_Response_NoData(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // Nothing. + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + }) +} + +func TestServer_Response_NoData_Header_FooBar(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Foo-Bar", "some-value") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo-bar", "some-value"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "foo/bar") + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "foo/bar"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_TransferEncoding_chunked(t *testing.T) { + const msg = "hi" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Transfer-Encoding", "chunked") // should be stripped + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed only after the initial write. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed before the initial write and later mutated. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("foo", "proper value") + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "proper value"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_SniffLenType(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { + const msg = "this is HTML" + const msg2 = ", and this is the next chunk" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg2) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + { + df := st.wantData() + if df.StreamEnded() { + t.Error("unexpected END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + } + { + df := st.wantData() + if !df.StreamEnded() { + t.Error("wanted END_STREAM flag on last data chunk") + } + if got := string(df.Data()); got != msg2 { + t.Errorf("got DATA %q; want %q", got, msg2) + } + } + }) +} + +func TestServer_Response_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + // Give the handler quota to write: + if err := st.fr.WriteWindowUpdate(1, size); err != nil { + t.Fatal(err) + } + // Give the handler quota to write to connection-level + // window as well + if err := st.fr.WriteWindowUpdate(0, size); err != nil { + t.Fatal(err) + } + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + var bytes, frames int + for { + df := st.wantData() + bytes += len(df.Data()) + frames++ + for _, b := range df.Data() { + if b != 'a' { + t.Fatal("non-'a' byte seen in DATA") + } + } + if df.StreamEnded() { + break + } + } + if bytes != size { + t.Errorf("Got %d bytes; want %d", bytes, size) + } + if want := int(size / maxFrameSize); frames < want || frames > want*2 { + t.Errorf("Got %d frames; want %d", frames, size) + } + }) +} + +// Test that the handler can't write more than the client allows +func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + // Set the window size to something explicit for this test. + // It's also how much initial data we expect. + const initWindowSize = 123 + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, initWindowSize}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + defer func() { st.fr.WriteRSTStream(1, ErrCodeCancel) }() + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != initWindowSize { + t.Fatalf("Initial window size = %d but got DATA with %d bytes", initWindowSize, got) + } + + for _, quota := range []int{1, 13, 127} { + if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { + t.Fatal(err) + } + df := st.wantData() + if int(quota) != len(df.Data()) { + t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) + } + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. +func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + errc := make(chan error, 1) + go func() { + _, err := w.Write(bytes.Repeat([]byte("a"), size)) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + return errors.New("unexpected nil error from Write in handler") + } + return nil + case <-time.After(2 * time.Second): + return errors.New("timeout waiting for Write in handler") + } + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + // Nothing; send empty DATA + return nil + }, func(st *serverTester) { + // Handler gets no data quota: + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != 0 { + t.Fatalf("unexpected %d DATA bytes; want 0", got) + } + if !df.StreamEnded() { + t.Fatal("DATA didn't have END_STREAM") + } + }) +} + +func TestServer_Response_Automatic100Continue(t *testing.T) { + const msg = "foo" + const reply = "bar" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + if v := r.Header.Get("Expect"); v != "" { + t.Errorf("Expect header = %q; want empty", v) + } + buf := make([]byte, len(msg)) + // This read should trigger the 100-continue being sent. + if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { + return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) + } + _, err := io.WriteString(w, reply) + return err + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "100"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Fatalf("Got headers %v; want %v", goth, wanth) + } + + // Okay, they sent status 100, so we can send our + // gigantic and/or sensitive "foo" payload now. + st.writeData(1, true, []byte(msg)) + + st.wantWindowUpdate(0, uint32(len(msg))) + + hf = st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("expected data to follow") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth = st.decodeHeader(hf.HeaderBlockFragment()) + wanth = [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(reply))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + + df := st.wantData() + if string(df.Data()) != reply { + t.Errorf("Client read %q; want %q", df.Data(), reply) + } + if !df.StreamEnded() { + t.Errorf("expect data stream end") + } + }) +} + +func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { + errc := make(chan error, 1) + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + p := []byte("some data.\n") + for { + _, err := w.Write(p) + if err != nil { + errc <- err + return nil + } + } + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + // Close the connection and wait for the handler to (hopefully) notice. + st.cc.Close() + select { + case <-errc: + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_Too_Many_Streams(t *testing.T) { + const testPath = "/some/path" + + inHandler := make(chan uint32) + leaveHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + id := w.(*responseWriter).rws.stream.id + inHandler <- id + if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { + t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) + } + <-leaveHandler + }) + defer st.Close() + st.greet() + nextStreamID := uint32(1) + streamID := func() uint32 { + defer func() { nextStreamID += 2 }() + return nextStreamID + } + sendReq := func(id uint32, headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) + } + for i := 0; i < defaultMaxStreams; i++ { + sendReq(streamID()) + <-inHandler + } + defer func() { + for i := 0; i < defaultMaxStreams; i++ { + leaveHandler <- true + } + }() + + // And this one should cross the limit: + // (It's also sent as a CONTINUATION, to verify we still track the decoder context, + // even if we're rejecting it) + rejectID := streamID() + headerBlock := st.encodeHeader(":path", testPath) + frag1, frag2 := headerBlock[:3], headerBlock[3:] + st.writeHeaders(HeadersFrameParam{ + StreamID: rejectID, + BlockFragment: frag1, + EndStream: true, + EndHeaders: false, // CONTINUATION coming + }) + if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { + t.Fatal(err) + } + st.wantRSTStream(rejectID, ErrCodeProtocol) + + // But let a handler finish: + leaveHandler <- true + st.wantHeaders() + + // And now another stream should be able to start: + goodID := streamID() + sendReq(goodID, ":path", testPath) + select { + case got := <-inHandler: + if got != goodID { + t.Errorf("Got stream %d; want %d", got, goodID) + } + case <-time.After(3 * time.Second): + t.Error("timeout waiting for handler") + } +} + +// So many response headers that the server needs to use CONTINUATION frames: +func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + h := w.Header() + for i := 0; i < 5000; i++ { + h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) + } + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.HeadersEnded() { + t.Fatal("got unwanted END_HEADERS flag") + } + n := 0 + for { + n++ + cf := st.wantContinuation() + if cf.HeadersEnded() { + break + } + } + if n < 5 { + t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) + } + }) +} + +// This previously crashed (reported by Mathieu Lonjaret as observed +// while using Camlistore) because we got a DATA frame from the client +// after the handler exited and our logic at the time was wrong, +// keeping a stream in the map in stateClosed, which tickled an +// invariant check later when we tried to remove that stream (via +// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop +// ended. +func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // nothing + return nil + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, // DATA is coming + EndHeaders: true, + }) + hf := st.wantHeaders() + if !hf.HeadersEnded() || !hf.StreamEnded() { + t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) + } + + // Sent when the a Handler closes while a client has + // indicated it's still sending DATA: + st.wantRSTStream(1, ErrCodeCancel) + + // Now the handler has ended, so it's ended its + // stream, but the client hasn't closed its side + // (stateClosedLocal). So send more data and verify + // it doesn't crash with an internal invariant panic, like + // it did before. + st.writeData(1, true, []byte("foo")) + + // Sent after a peer sends data anyway (admittedly the + // previous RST_STREAM might've still been in-flight), + // but they'll get the more friendly 'cancel' code + // first. + st.wantRSTStream(1, ErrCodeStreamClosed) + + // Set up a bunch of machinery to record the panic we saw + // previously. + var ( + panMu sync.Mutex + panicVal interface{} + ) + + testHookOnPanicMu.Lock() + testHookOnPanic = func(sc *serverConn, pv interface{}) bool { + panMu.Lock() + panicVal = pv + panMu.Unlock() + return true + } + testHookOnPanicMu.Unlock() + + // Now force the serve loop to end, via closing the connection. + st.cc.Close() + select { + case <-st.sc.doneServing: + // Loop has exited. + panMu.Lock() + got := panicVal + panMu.Unlock() + if got != nil { + t.Errorf("Got panic: %v", got) + } + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } +func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } + +func testRejectTLS(t *testing.T, max uint16) { + st := newServerTester(t, nil, func(c *tls.Config) { + c.MaxVersion = max + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Rejects_TLSBadCipher(t *testing.T) { + st := newServerTester(t, nil, func(c *tls.Config) { + // Only list bad ones: + c.CipherSuites = []uint16{ + tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + } + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Advertises_Common_Cipher(t *testing.T) { + const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + st := newServerTester(t, nil, func(c *tls.Config) { + // Have the client only support the one required by the spec. + c.CipherSuites = []uint16{requiredSuite} + }, func(ts *httptest.Server) { + var srv *http.Server = ts.Config + // Have the server configured with no specific cipher suites. + // This tests that Go's defaults include the required one. + srv.TLSConfig = nil + }) + defer st.Close() + st.greet() +} + +func (st *serverTester) onHeaderField(f hpack.HeaderField) { + if f.Name == "date" { + return + } + st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value}) +} + +func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { + st.decodedHeaders = nil + if _, err := st.hpackDec.Write(headerBlock); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + if err := st.hpackDec.Close(); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + return st.decodedHeaders +} + +// testServerResponse sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then reply to it in some way with the provided handler, +// and then verify the output with the serverTester again (assuming the handler returns nil) +func testServerResponse(t testing.TB, + handler func(http.ResponseWriter, *http.Request) error, + client func(*serverTester), +) { + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + errc <- handler(w, r) + }) + defer st.Close() + + donec := make(chan bool) + go func() { + defer close(donec) + st.greet() + client(st) + }() + + select { + case <-donec: + return + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } + + select { + case err := <-errc: + if err != nil { + t.Fatalf("Error in handler: %v", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for handler to finish") + } +} + +// readBodyHandler returns an http Handler func that reads len(want) +// bytes from r.Body and fails t if the contents read were not +// the value of want. +func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + buf := make([]byte, len(want)) + _, err := io.ReadFull(r.Body, buf) + if err != nil { + t.Error(err) + return + } + if string(buf) != want { + t.Errorf("read %q; want %q", buf, want) + } + } +} + +// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: +// https://github.com/tatsuhiro-t/nghttp2/issues/140 & +// http://sourceforge.net/p/curl/bugs/1472/ +func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } +func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } + +func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + if testing.Short() { + t.Skip("skipping curl test in short mode") + } + requireCurl(t) + var gotConn int32 + testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } + + const msg = "Hello from curl!\n" + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + w.Header().Set("Client-Proto", r.Proto) + io.WriteString(w, msg) + })) + ConfigureServer(ts.Config, &Server{ + PermitProhibitedCipherSuites: permitProhibitedCipherSuites, + }) + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + ts.StartTLS() + defer ts.Close() + + t.Logf("Running test server for curl to hit at: %s", ts.URL) + container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) + defer kill(container) + resc := make(chan interface{}, 1) + go func() { + res, err := dockerLogs(container) + if err != nil { + resc <- err + } else { + resc <- res + } + }() + select { + case res := <-resc: + if err, ok := res.(error); ok { + t.Fatal(err) + } + body := string(res.([]byte)) + // Search for both "key: value" and "key:value", since curl changed their format + // Our Dockerfile contains the latest version (no space), but just in case people + // didn't rebuild, check both. + if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") { + t.Errorf("didn't see foo: Bar header") + t.Logf("Got: %s", body) + } + if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") { + t.Errorf("didn't see client-proto: HTTP/2 header") + t.Logf("Got: %s", res) + } + if !strings.Contains(string(res.([]byte)), msg) { + t.Errorf("didn't see %q content", msg) + t.Logf("Got: %s", res) + } + case <-time.After(3 * time.Second): + t.Errorf("timeout waiting for curl") + } + + if atomic.LoadInt32(&gotConn) == 0 { + t.Error("never saw an http2 connection") + } +} + +var doh2load = flag.Bool("h2load", false, "Run h2load test") + +func TestServerWithH2Load(t *testing.T) { + if !*doh2load { + t.Skip("Skipping without --h2load flag.") + } + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + requireH2load(t) + + msg := strings.Repeat("Hello, h2load!\n", 5000) + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg) + })) + ts.StartTLS() + defer ts.Close() + + cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl", + "-n100000", "-c100", "-m100", ts.URL) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + t.Fatal(err) + } +} + +// Issue 12843 +func TestServerDoS_MaxHeaderListSize(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + + // shake hands + st.writePreface() + st.writeInitialSettings() + frameSize := defaultMaxReadFrameSize + var advHeaderListSize *uint32 + st.wantSettings().ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + if s.Val < minMaxFrameSize { + frameSize = minMaxFrameSize + } else if s.Val > maxFrameSize { + frameSize = maxFrameSize + } else { + frameSize = int(s.Val) + } + case SettingMaxHeaderListSize: + advHeaderListSize = &s.Val + } + return nil + }) + st.writeSettingsAck() + st.wantSettingsAck() + + if advHeaderListSize == nil { + t.Errorf("server didn't advertise a max header list size") + } else if *advHeaderListSize == 0 { + t.Errorf("server advertised a max header list size of 0") + } + + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + cookie := strings.Repeat("*", 4058) + st.encodeHeaderField("cookie", cookie) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.headerBuf.Bytes(), + EndStream: true, + EndHeaders: false, + }) + + // Capture the short encoding of a duplicate ~4K cookie, now + // that we've already sent it once. + st.headerBuf.Reset() + st.encodeHeaderField("cookie", cookie) + + // Now send 1MB of it. + const size = 1 << 20 + b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len()) + for len(b) > 0 { + chunk := b + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + b = b[len(chunk):] + st.fr.WriteContinuation(1, len(b) == 0, chunk) + } + + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func TestCompressionErrorOnWrite(t *testing.T) { + const maxStrLen = 8 << 10 + var serverConfig *http.Server + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }, func(ts *httptest.Server) { + serverConfig = ts.Config + serverConfig.MaxHeaderBytes = maxStrLen + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + maxAllowed := st.sc.framer.maxHeaderStringLen() + + // Crank this up, now that we have a conn connected with the + // hpack.Decoder's max string length set has been initialized + // from the earlier low ~8K value. We want this higher so don't + // hit the max header list size. We only want to test hitting + // the max string size. + serverConfig.MaxHeaderBytes = 1 << 20 + + // First a request with a header that's exactly the max allowed size + // for the hpack compression. It's still too long for the header list + // size, so we'll get the 431 error, but that keeps the compression + // context still valid. + hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } + df := st.wantData() + if !strings.Contains(string(df.Data()), "HTTP Error 431") { + t.Errorf("Unexpected data body: %q", df.Data()) + } + if !df.StreamEnded() { + t.Fatalf("expect data stream end") + } + + // And now send one that's just one byte too big. + hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 3, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +func TestCompressionErrorOnClose(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + hbf := st.encodeHeader("foo", "bar") + hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails. + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +// test that a server handler can read trailers from a client +func TestServerReadsTrailers(t *testing.T) { + const testBody = "some test body" + writeReq := func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(1, false, []byte(testBody)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeaderRaw( + "foo", "foov", + "bar", "barv", + "baz", "bazv", + "surprise", "wasn't declared; shouldn't show up", + ), + EndStream: true, + EndHeaders: true, + }) + } + checkReq := func(r *http.Request) { + wantTrailer := http.Header{ + "Foo": nil, + "Bar": nil, + "Baz": nil, + } + if !reflect.DeepEqual(r.Trailer, wantTrailer) { + t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer) + } + slurp, err := ioutil.ReadAll(r.Body) + if string(slurp) != testBody { + t.Errorf("read body %q; want %q", slurp, testBody) + } + if err != nil { + t.Fatalf("Body slurp: %v", err) + } + wantTrailerAfter := http.Header{ + "Foo": {"foov"}, + "Bar": {"barv"}, + "Baz": {"bazv"}, + } + if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) { + t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter) + } + } + testServerRequest(t, writeReq, checkReq) +} + +// test that a server handler can send trailers +func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) } +func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) } + +func testServerWritesTrailers(t *testing.T, withFlush bool) { + // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") + w.Header().Add("Trailer", "Server-Trailer-C") + w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered + + // Regular headers: + w.Header().Set("Foo", "Bar") + w.Header().Set("Content-Length", "5") // len("Hello") + + io.WriteString(w, "Hello") + if withFlush { + w.(http.Flusher).Flush() + } + w.Header().Set("Server-Trailer-A", "valuea") + w.Header().Set("Server-Trailer-C", "valuec") // skipping B + // After a flush, random keys like Server-Surprise shouldn't show up: + w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!") + // But we do permit promoting keys to trailers after a + // flush if they start with the magic + // otherwise-invalid "Trailer:" prefix: + w.Header().Set("Trailer:Post-Header-Trailer", "hi1") + w.Header().Set("Trailer:post-header-trailer2", "hi2") + w.Header().Set("Trailer:Range", "invalid") + w.Header().Set("Trailer:Foo\x01Bogus", "invalid") + w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40") + w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40") + w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("response HEADERS had END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "Bar"}, + {"trailer", "Server-Trailer-A, Server-Trailer-B"}, + {"trailer", "Server-Trailer-C"}, + {"trailer", "Transfer-Encoding, Content-Length, Trailer"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "5"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + df := st.wantData() + if string(df.Data()) != "Hello" { + t.Fatalf("Client read %q; want Hello", df.Data()) + } + if df.StreamEnded() { + t.Fatalf("data frame had STREAM_ENDED") + } + tf := st.wantHeaders() // for the trailers + if !tf.StreamEnded() { + t.Fatalf("trailers HEADERS lacked END_STREAM") + } + if !tf.HeadersEnded() { + t.Fatalf("trailers HEADERS lacked END_HEADERS") + } + wanth = [][2]string{ + {"post-header-trailer", "hi1"}, + {"post-header-trailer2", "hi2"}, + {"server-trailer-a", "valuea"}, + {"server-trailer-c", "valuec"}, + } + goth = st.decodeHeader(tf.HeaderBlockFragment()) + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +// validate transmitted header field names & values +// golang.org/issue/14048 +func TestServerDoesntWriteInvalidHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Add("OK1", "x") + w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key + w.Header().Add("Bad1\x00", "x") // null in key + w.Header().Add("Bad2", "x\x00y") // null in value + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("response HEADERS lacked END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"ok1", "x"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +func BenchmarkServerGets(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +func BenchmarkServerPosts(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(id, true, nil) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 +// Verify we don't hang. +func TestIssue53(t *testing.T) { + const data = "PRI * HTTP/2.0\r\n\r\nSM" + + "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad" + s := &http.Server{ + ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags), + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("hello")) + }), + } + s2 := &Server{ + MaxReadFrameSize: 1 << 16, + PermitProhibitedCipherSuites: true, + } + c := &issue53Conn{[]byte(data), false, false} + s2.ServeConn(c, &ServeConnOpts{BaseConfig: s}) + if !c.closed { + t.Fatal("connection is not closed") + } +} + +type issue53Conn struct { + data []byte + closed bool + written bool +} + +func (c *issue53Conn) Read(b []byte) (n int, err error) { + if len(c.data) == 0 { + return 0, io.EOF + } + n = copy(b, c.data) + c.data = c.data[n:] + return +} + +func (c *issue53Conn) Write(b []byte) (n int, err error) { + c.written = true + return len(b), nil +} + +func (c *issue53Conn) Close() error { + c.closed = true + return nil +} + +func (c *issue53Conn) LocalAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) SetDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } + +// golang.org/issue/12895 +func TestConfigureServer(t *testing.T) { + tests := []struct { + name string + in http.Server + wantErr string + }{ + { + name: "empty server", + in: http.Server{}, + }, + { + name: "just the required cipher suite", + in: http.Server{ + TLSConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + }, + }, + { + name: "missing required cipher suite", + in: http.Server{ + TLSConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, + }, + }, + wantErr: "is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + }, + { + name: "required after bad", + in: http.Server{ + TLSConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + }, + wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after", + }, + { + name: "bad after required", + in: http.Server{ + TLSConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, + }, + }, + }, + } + for _, tt := range tests { + err := ConfigureServer(&tt.in, nil) + if (err != nil) != (tt.wantErr != "") { + if tt.wantErr != "" { + t.Errorf("%s: success, but want error", tt.name) + } else { + t.Errorf("%s: unexpected error: %v", tt.name, err) + } + } + if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr) + } + if err == nil && !tt.in.TLSConfig.PreferServerCipherSuites { + t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name) + } + } +} + +func TestServerRejectHeadWithBody(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: false, // what we're testing, a bogus HEAD request with body + EndHeaders: true, + }) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func TestServerNoAutoContentLengthOnHead(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. (or smaller than one frame) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +// golang.org/issue/13495 +func TestServerNoDuplicateContentType(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header()["Content-Type"] = []string{""} + fmt.Fprintf(w, "hi") + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + {"content-type", ""}, + {"content-length", "41"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func disableGoroutineTracking() (restore func()) { + old := DebugGoroutines + DebugGoroutines = false + return func() { DebugGoroutines = old } +} + +func BenchmarkServer_GetRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "GET") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + st.wantData() + } +} + +func BenchmarkServer_PostRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "POST") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: false, + EndHeaders: true, + }) + st.writeData(streamID, true, nil) + st.wantHeaders() + st.wantData() + } +} + +type connStateConn struct { + net.Conn + cs tls.ConnectionState +} + +func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs } + +// golang.org/issue/12737 -- handle any net.Conn, not just +// *tls.Conn. +func TestServerHandleCustomConn(t *testing.T) { + var s Server + c1, c2 := net.Pipe() + clientDone := make(chan struct{}) + handlerDone := make(chan struct{}) + var req *http.Request + go func() { + defer close(clientDone) + defer c2.Close() + fr := NewFramer(c2, c2) + io.WriteString(c2, ClientPreface) + fr.WriteSettings() + fr.WriteSettingsAck() + f, err := fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() { + t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f)) + return + } + f, err = fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() { + t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f)) + return + } + var henc hpackEncoder + fr.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"), + EndStream: true, + EndHeaders: true, + }) + go io.Copy(ioutil.Discard, c2) + <-handlerDone + }() + const testString = "my custom ConnectionState" + fakeConnState := tls.ConnectionState{ + ServerName: testString, + Version: tls.VersionTLS12, + } + go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{ + BaseConfig: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(handlerDone) + req = r + }), + }}) + select { + case <-clientDone: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for handler") + } + if req.TLS == nil { + t.Fatalf("Request.TLS is nil. Got: %#v", req) + } + if req.TLS.ServerName != testString { + t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString) + } +} + +// golang.org/issue/14214 +func TestServer_Rejects_ConnHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + t.Errorf("should not get to Handler") + return nil + }, func(st *serverTester) { + st.bodylessReq1("connection", "foo") + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "400"}, + {"content-type", "text/plain; charset=utf-8"}, + {"x-content-type-options", "nosniff"}, + {"content-length", "51"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +type hpackEncoder struct { + enc *hpack.Encoder + buf bytes.Buffer +} + +func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + he.buf.Reset() + if he.enc == nil { + he.enc = hpack.NewEncoder(&he.buf) + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + headers = headers[2:] + } + return he.buf.Bytes() +} + +func TestCheckValidHTTP2Request(t *testing.T) { + tests := []struct { + req *http.Request + want error + }{ + { + req: &http.Request{Header: http.Header{"Te": {"trailers"}}}, + want: nil, + }, + { + req: &http.Request{Header: http.Header{"Te": {"trailers", "bogus"}}}, + want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Foo": {""}}}, + want: nil, + }, + { + req: &http.Request{Header: http.Header{"Connection": {""}}}, + want: errors.New(`request header "Connection" is not valid in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Proxy-Connection": {""}}}, + want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Keep-Alive": {""}}}, + want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Upgrade": {""}}}, + want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), + }, + } + for i, tt := range tests { + got := checkValidHTTP2Request(tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) + } + } +} + +// golang.org/issue/14030 +func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { + const msg = "Hello" + const msg2 = "World" + + doRead := make(chan bool, 1) + defer close(doRead) // fallback cleanup + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + + // Do a read, which might force a 100-continue status to be sent. + <-doRead + r.Body.Read(make([]byte, 10)) + + io.WriteString(w, msg2) + + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) + req.Header.Set("Expect", "100-continue") + + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + buf := make([]byte, len(msg)) + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg { + t.Fatalf("msg = %q; want %q", buf, msg) + } + + doRead <- true + + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg2 { + t.Fatalf("second msg = %q; want %q", buf, msg2) + } +} diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml new file mode 100644 index 00000000000..03399f4ec59 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml @@ -0,0 +1,5021 @@ + + + + + + + + + + + + + + + + + + + Hypertext Transfer Protocol version 2 + + + Twist +
    + mbelshe@chromium.org +
    +
    + + + Google, Inc +
    + fenix@google.com +
    +
    + + + Mozilla +
    + + 331 E Evelyn Street + Mountain View + CA + 94041 + US + + martin.thomson@gmail.com +
    +
    + + + Applications + HTTPbis + HTTP + SPDY + Web + + + + This specification describes an optimized expression of the semantics of the Hypertext + Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a + reduced perception of latency by introducing header field compression and allowing multiple + concurrent messages on the same connection. It also introduces unsolicited push of + representations from servers to clients. + + + This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. + HTTP's existing semantics remain unchanged. + + + + + + Discussion of this draft takes place on the HTTPBIS working group mailing list + (ietf-http-wg@w3.org), which is archived at . + + + Working Group information can be found at ; that specific to HTTP/2 are at . + + + The changes in this draft are summarized in . + + + +
    + + +
    + + + The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the + HTTP/1.1 message format () has + several characteristics that have a negative overall effect on application performance + today. + + + In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given + TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed + request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 + clients that need to make many requests typically use multiple connections to a server in + order to achieve concurrency and thereby reduce latency. + + + Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary + network traffic, as well as causing the initial TCP congestion + window to quickly fill. This can result in excessive latency when multiple requests are + made on a new TCP connection. + + + HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an + underlying connection. Specifically, it allows interleaving of request and response + messages on the same connection and uses an efficient coding for HTTP header fields. It + also allows prioritization of requests, letting more important requests complete more + quickly, further improving performance. + + + The resulting protocol is more friendly to the network, because fewer TCP connections can + be used in comparison to HTTP/1.x. This means less competition with other flows, and + longer-lived connections, which in turn leads to better utilization of available network + capacity. + + + Finally, HTTP/2 also enables more efficient processing of messages through use of binary + message framing. + +
    + +
    + + HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core + features of HTTP/1.1, but aims to be more efficient in several ways. + + + The basic protocol unit in HTTP/2 is a frame. Each frame + type serves a different purpose. For example, HEADERS and + DATA frames form the basis of HTTP requests and + responses; other frame types like SETTINGS, + WINDOW_UPDATE, and PUSH_PROMISE are used in support of other + HTTP/2 features. + + + Multiplexing of requests is achieved by having each HTTP request-response exchange + associated with its own stream. Streams are largely + independent of each other, so a blocked or stalled request or response does not prevent + progress on other streams. + + + Flow control and prioritization ensure that it is possible to efficiently use multiplexed + streams. Flow control helps to ensure that only data that + can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed + to the most important streams first. + + + HTTP/2 adds a new interaction mode, whereby a server can push + responses to a client. Server push allows a server to speculatively send a client + data that the server anticipates the client will need, trading off some network usage + against a potential latency gain. The server does this by synthesizing a request, which it + sends as a PUSH_PROMISE frame. The server is then able to send a response to + the synthetic request on a separate stream. + + + Frames that contain HTTP header fields are compressed. + HTTP requests can be highly redundant, so compression can reduce the size of requests and + responses significantly. + + +
    + + The HTTP/2 specification is split into four parts: + + + Starting HTTP/2 covers how an HTTP/2 connection is + initiated. + + + The framing and streams layers describe the way HTTP/2 frames are + structured and formed into multiplexed streams. + + + Frame and error + definitions include details of the frame and error types used in HTTP/2. + + + HTTP mappings and additional + requirements describe how HTTP semantics are expressed using frames and + streams. + + + + + While some of the frame and stream layer concepts are isolated from HTTP, this + specification does not define a completely generic framing layer. The framing and streams + layers are tailored to the needs of the HTTP protocol and server push. + +
    + +
    + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD + NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as + described in RFC 2119. + + + All numeric values are in network byte order. Values are unsigned unless otherwise + indicated. Literal values are provided in decimal or hexadecimal as appropriate. + Hexadecimal literals are prefixed with 0x to distinguish them + from decimal literals. + + + The following terms are used: + + + The endpoint initiating the HTTP/2 connection. + + + A transport-layer connection between two endpoints. + + + An error that affects the entire HTTP/2 connection. + + + Either the client or server of the connection. + + + The smallest unit of communication within an HTTP/2 connection, consisting of a header + and a variable-length sequence of octets structured according to the frame type. + + + An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint + that is remote to the primary subject of discussion. + + + An endpoint that is receiving frames. + + + An endpoint that is transmitting frames. + + + The endpoint which did not initiate the HTTP/2 connection. + + + A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. + + + An error on the individual HTTP/2 stream. + + + + + Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined + in . + +
    +
    + +
    + + An HTTP/2 connection is an application layer protocol running on top of a TCP connection + (). The client is the TCP connection initiator. + + + HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same + default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, + implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the + upstream server (the immediate peer to which the client wishes to establish a connection) + supports HTTP/2. + + + + The means by which support for HTTP/2 is determined is different for "http" and "https" + URIs. Discovery for "http" URIs is described in . Discovery + for "https" URIs is described in . + + +
    + + The protocol defined in this document has two identifiers. + + + + The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) + field and any place that HTTP/2 over TLS is identified. + + + The "h2" string is serialized into an ALPN protocol identifier as the two octet + sequence: 0x68, 0x32. + + + + + The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. + This identifier is used in the HTTP/1.1 Upgrade header field and any place that + HTTP/2 over TCP is identified. + + + + + + Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message + semantics described in this document. + + + RFC Editor's Note: please remove the remainder of this section prior to the + publication of a final version of this document. + + + Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". + Until such an RFC exists, implementations MUST NOT identify themselves using these + strings. + + + Examples and text throughout the rest of this document use "h2" as a matter of + editorial convenience only. Implementations of draft versions MUST NOT identify using + this string. + + + Implementations of draft versions of the protocol MUST add the string "-" and the + corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 + over TLS is identified using the string "h2-11". + + + Non-compatible experiments that are based on these draft versions MUST append the string + "-" and an experiment name to the identifier. For example, an experimental implementation + of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself + as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in + . Experimenters are + encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. + +
    + +
    + + A client that makes a request for an "http" URI without prior knowledge about support for + HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade + header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include + exactly one HTTP2-Settings header field. + +
    + For example: + + +]]> +
    + + Requests that contain an entity body MUST be sent in their entirety before the client can + send HTTP/2 frames. This means that a large request entity can block the use of the + connection until it is completely sent. + + + If concurrency of an initial request with subsequent requests is important, an OPTIONS + request can be used to perform the upgrade to HTTP/2, at the cost of an additional + round-trip. + + + A server that does not support HTTP/2 can respond to the request as though the Upgrade + header field were absent: + +
    + +HTTP/1.1 200 OK +Content-Length: 243 +Content-Type: text/html + +... + +
    + + A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with + "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . + + + A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) + response. After the empty line that terminates the 101 response, the server can begin + sending HTTP/2 frames. These frames MUST include a response to the request that initiated + the Upgrade. + + +
    + + For example: + + +HTTP/1.1 101 Switching Protocols +Connection: Upgrade +Upgrade: h2c + +[ HTTP/2 connection ... + +
    + + The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a + SETTINGS frame. + + + The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is + assigned default priority values. Stream 1 is + implicitly half closed from the client toward the server, since the request is completed + as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the + response. + + +
    + + A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field + that includes parameters that govern the HTTP/2 connection, provided in anticipation of + the server accepting the request to upgrade. + +
    + +
    + + A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, + or if more than one is present. A server MUST NOT send this header field. + + + + The content of the HTTP2-Settings header field is the + payload of a SETTINGS frame (), encoded as a + base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The + ABNF production for token68 is + defined in . + + + Since the upgrade is only intended to apply to the immediate connection, a client + sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded + downstream. + + + A server decodes and interprets these values as it would any other + SETTINGS frame. Acknowledgement of the + SETTINGS parameters is not necessary, since a 101 response serves as implicit + acknowledgment. Providing these values in the Upgrade request gives a client an + opportunity to provide parameters prior to receiving any frames from the server. + +
    +
    + +
    + + A client that makes a request to an "https" URI uses TLS + with the application layer protocol negotiation extension. + + + HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a + client or selected by a server. + + + Once TLS negotiation is complete, both the client and the server send a connection preface. + +
    + +
    + + A client can learn that a particular server supports HTTP/2 by other means. For example, + describes a mechanism for advertising this capability. + + + A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, + after the connection preface; a server can + identify such a connection by the presence of the connection preface. This only affects + the establishment of HTTP/2 connections over cleartext TCP; implementations that support + HTTP/2 over TLS MUST use protocol negotiation in TLS. + + + Without additional information, prior support for HTTP/2 is not a strong signal that a + given server will support HTTP/2 for future connections. For example, it is possible for + server configurations to change, for configurations to differ between instances in + clustered servers, or for network conditions to change. + +
    + +
    + + Upon establishment of a TCP connection and determination that HTTP/2 will be used by both + peers, each endpoint MUST send a connection preface as a final confirmation and to + establish the initial SETTINGS parameters for the HTTP/2 connection. The client and + server each send a different connection preface. + + + The client connection preface starts with a sequence of 24 octets, which in hex notation + are: + +
    + +
    + + (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence + is followed by a SETTINGS frame (). The + SETTINGS frame MAY be empty. The client sends the client connection + preface immediately upon receipt of a 101 Switching Protocols response (indicating a + successful upgrade), or as the first application data octets of a TLS connection. If + starting an HTTP/2 connection with prior knowledge of server support for the protocol, the + client connection preface is sent upon connection establishment. + + + + + The client connection preface is selected so that a large proportion of HTTP/1.1 or + HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note + that this does not address the concerns raised in . + + + + + The server connection preface consists of a potentially empty SETTINGS + frame () that MUST be the first frame the server sends in the + HTTP/2 connection. + + + The SETTINGS frames received from a peer as part of the connection preface + MUST be acknowledged (see ) after sending the connection + preface. + + + To avoid unnecessary latency, clients are permitted to send additional frames to the + server immediately after sending the client connection preface, without waiting to receive + the server connection preface. It is important to note, however, that the server + connection preface SETTINGS frame might include parameters that necessarily + alter how a client is expected to communicate with the server. Upon receiving the + SETTINGS frame, the client is expected to honor any parameters established. + In some configurations, it is possible for the server to transmit SETTINGS + before the client sends additional frames, providing an opportunity to avoid this issue. + + + Clients and servers MUST treat an invalid connection preface as a connection error of type + PROTOCOL_ERROR. A GOAWAY frame () + MAY be omitted in this case, since an invalid preface indicates that the peer is not using + HTTP/2. + +
    +
    + +
    + + Once the HTTP/2 connection is established, endpoints can begin exchanging frames. + + +
    + + All frames begin with a fixed 9-octet header followed by a variable-length payload. + +
    + +
    + + The fields of the frame header are defined as: + + + + The length of the frame payload expressed as an unsigned 24-bit integer. Values + greater than 214 (16,384) MUST NOT be sent unless the receiver has + set a larger value for SETTINGS_MAX_FRAME_SIZE. + + + The 9 octets of the frame header are not included in this value. + + + + + The 8-bit type of the frame. The frame type determines the format and semantics of + the frame. Implementations MUST ignore and discard any frame that has a type that + is unknown. + + + + + An 8-bit field reserved for frame-type specific boolean flags. + + + Flags are assigned semantics specific to the indicated frame type. Flags that have + no defined semantics for a particular frame type MUST be ignored, and MUST be left + unset (0) when sending. + + + + + A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST + remain unset (0) when sending and MUST be ignored when receiving. + + + + + A 31-bit stream identifier (see ). The value 0 is + reserved for frames that are associated with the connection as a whole as opposed to + an individual stream. + + + + + + The structure and content of the frame payload is dependent entirely on the frame type. + +
    + +
    + + The size of a frame payload is limited by the maximum size that a receiver advertises in + the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value + between 214 (16,384) and 224-1 (16,777,215) octets, + inclusive. + + + All implementations MUST be capable of receiving and minimally processing frames up to + 214 octets in length, plus the 9 octet frame + header. The size of the frame header is not included when describing frame sizes. + + + Certain frame types, such as PING, impose additional limits + on the amount of payload data allowed. + + + + + If a frame size exceeds any defined limit, or is too small to contain mandatory frame + data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error + in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying + a header block (that is, HEADERS, + PUSH_PROMISE, and CONTINUATION), SETTINGS, + and any WINDOW_UPDATE frame with a stream identifier of 0. + + + Endpoints are not obligated to use all available space in a frame. Responsiveness can be + improved by using frames that are smaller than the permitted maximum size. Sending large + frames can result in delays in sending time-sensitive frames (such + RST_STREAM, WINDOW_UPDATE, or PRIORITY) + which if blocked by the transmission of a large frame, could affect performance. + +
    + +
    + + Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. + They are used within HTTP request and response messages as well as server push operations + (see ). + + + Header lists are collections of zero or more header fields. When transmitted over a + connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then + divided into one or more octet sequences, called header block fragments, and transmitted + within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. + + + The Cookie header field is treated specially by the HTTP + mapping (see ). + + + A receiving endpoint reassembles the header block by concatenating its fragments, then + decompresses the block to reconstruct the header list. + + + A complete header block consists of either: + + + a single HEADERS or PUSH_PROMISE frame, + with the END_HEADERS flag set, or + + + a HEADERS or PUSH_PROMISE frame with the END_HEADERS + flag cleared and one or more CONTINUATION frames, + where the last CONTINUATION frame has the END_HEADERS flag set. + + + + + Header compression is stateful. One compression context and one decompression context is + used for the entire connection. Each header block is processed as a discrete unit. + Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved + frames of any other type or from any other stream. The last frame in a sequence of + HEADERS or CONTINUATION frames MUST have the END_HEADERS + flag set. The last frame in a sequence of PUSH_PROMISE or + CONTINUATION frames MUST have the END_HEADERS flag set. This allows a + header block to be logically equivalent to a single frame. + + + Header block fragments can only be sent as the payload of HEADERS, + PUSH_PROMISE or CONTINUATION frames, because these frames + carry data that can modify the compression context maintained by a receiver. An endpoint + receiving HEADERS, PUSH_PROMISE or + CONTINUATION frames MUST reassemble header blocks and perform decompression + even if the frames are to be discarded. A receiver MUST terminate the connection with a + connection error of type + COMPRESSION_ERROR if it does not decompress a header block. + +
    +
    + +
    + + A "stream" is an independent, bi-directional sequence of frames exchanged between the client + and server within an HTTP/2 connection. Streams have several important characteristics: + + + A single HTTP/2 connection can contain multiple concurrently open streams, with either + endpoint interleaving frames from multiple streams. + + + Streams can be established and used unilaterally or shared by either the client or + server. + + + Streams can be closed by either endpoint. + + + The order in which frames are sent on a stream is significant. Recipients process frames + in the order they are received. In particular, the order of HEADERS, + and DATA frames is semantically significant. + + + Streams are identified by an integer. Stream identifiers are assigned to streams by the + endpoint initiating the stream. + + + + +
    + + The lifecycle of a stream is shown in . + + +
    + + | |<-----------' | + | R | closed | R | + `-------------------->| |<--------------------' + +--------+ + + H: HEADERS frame (with implied CONTINUATIONs) + PP: PUSH_PROMISE frame (with implied CONTINUATIONs) + ES: END_STREAM flag + R: RST_STREAM frame +]]> + +
    + + + Note that this diagram shows stream state transitions and the frames and flags that affect + those transitions only. In this regard, CONTINUATION frames do not result + in state transitions; they are effectively part of the HEADERS or + PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is + processed as a separate event to the frame that bears it; a HEADERS frame + with the END_STREAM flag set can cause two state transitions. + + + Both endpoints have a subjective view of the state of a stream that could be different + when frames are in transit. Endpoints do not coordinate the creation of streams; they are + created unilaterally by either endpoint. The negative consequences of a mismatch in + states are limited to the "closed" state after sending RST_STREAM, where + frames might be received for some time after closing. + + + Streams have the following states: + + + + + + All streams start in the "idle" state. In this state, no frames have been + exchanged. + + + The following transitions are valid from this state: + + + Sending or receiving a HEADERS frame causes the stream to become + "open". The stream identifier is selected as described in . The same HEADERS frame can also + cause a stream to immediately become "half closed". + + + Sending a PUSH_PROMISE frame marks the associated stream for + later use. The stream state for the reserved stream transitions to "reserved + (local)". + + + Receiving a PUSH_PROMISE frame marks the associated stream as + reserved by the remote peer. The state of the stream becomes "reserved + (remote)". + + + + + Receiving any frames other than HEADERS or + PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (local)" state is one that has been promised by sending a + PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an + idle stream by associating the stream with an open stream that was initiated by the + remote peer (see ). + + + In this state, only the following transitions are possible: + + + The endpoint can send a HEADERS frame. This causes the stream to + open in a "half closed (remote)" state. + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MUST NOT send any type of frame other than HEADERS or + RST_STREAM in this state. + + + A PRIORITY frame MAY be received in this state. Receiving any type + of frame other than RST_STREAM or PRIORITY on a stream + in this state MUST be treated as a connection + error of type PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (remote)" state has been reserved by a remote peer. + + + In this state, only the following transitions are possible: + + + Receiving a HEADERS frame causes the stream to transition to + "half closed (local)". + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MAY send a PRIORITY frame in this state to reprioritize + the reserved stream. An endpoint MUST NOT send any type of frame other than + RST_STREAM, WINDOW_UPDATE, or PRIORITY + in this state. + + + Receiving any type of frame other than HEADERS or + RST_STREAM on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "open" state may be used by both peers to send frames of any type. + In this state, sending peers observe advertised stream + level flow control limits. + + + From this state either endpoint can send a frame with an END_STREAM flag set, which + causes the stream to transition into one of the "half closed" states: an endpoint + sending an END_STREAM flag causes the stream state to become "half closed (local)"; + an endpoint receiving an END_STREAM flag causes the stream state to become "half + closed (remote)". + + + Either endpoint can send a RST_STREAM frame from this state, causing + it to transition immediately to "closed". + + + + + + + A stream that is in the "half closed (local)" state cannot be used for sending + frames. Only WINDOW_UPDATE, PRIORITY and + RST_STREAM frames can be sent in this state. + + + A stream transitions from this state to "closed" when a frame that contains an + END_STREAM flag is received, or when either peer sends a RST_STREAM + frame. + + + A receiver can ignore WINDOW_UPDATE frames in this state, which might + arrive for a short period after a frame bearing the END_STREAM flag is sent. + + + PRIORITY frames received in this state are used to reprioritize + streams that depend on the current stream. + + + + + + + A stream that is "half closed (remote)" is no longer being used by the peer to send + frames. In this state, an endpoint is no longer obligated to maintain a receiver + flow control window if it performs flow control. + + + If an endpoint receives additional frames for a stream that is in this state, other + than WINDOW_UPDATE, PRIORITY or + RST_STREAM, it MUST respond with a stream error of type + STREAM_CLOSED. + + + A stream that is "half closed (remote)" can be used by the endpoint to send frames + of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. + + + A stream can transition from this state to "closed" by sending a frame that contains + an END_STREAM flag, or when either peer sends a RST_STREAM frame. + + + + + + + The "closed" state is the terminal state. + + + An endpoint MUST NOT send frames other than PRIORITY on a closed + stream. An endpoint that receives any frame other than PRIORITY + after receiving a RST_STREAM MUST treat that as a stream error of type + STREAM_CLOSED. Similarly, an endpoint that receives any frames after + receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type + STREAM_CLOSED, unless the frame is permitted as described below. + + + WINDOW_UPDATE or RST_STREAM frames can be received in + this state for a short period after a DATA or HEADERS + frame containing an END_STREAM flag is sent. Until the remote peer receives and + processes RST_STREAM or the frame bearing the END_STREAM flag, it + might send frames of these types. Endpoints MUST ignore + WINDOW_UPDATE or RST_STREAM frames received in this + state, though endpoints MAY choose to treat frames that arrive a significant time + after sending END_STREAM as a connection + error of type PROTOCOL_ERROR. + + + PRIORITY frames can be sent on closed streams to prioritize streams + that are dependent on the closed stream. Endpoints SHOULD process + PRIORITY frame, though they can be ignored if the stream has been + removed from the dependency tree (see ). + + + If this state is reached as a result of sending a RST_STREAM frame, + the peer that receives the RST_STREAM might have already sent - or + enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint + MUST ignore frames that it receives on closed streams after it has sent a + RST_STREAM frame. An endpoint MAY choose to limit the period over + which it ignores frames and treat frames that arrive after this time as being in + error. + + + Flow controlled frames (i.e., DATA) received after sending + RST_STREAM are counted toward the connection flow control window. + Even though these frames might be ignored, because they are sent before the sender + receives the RST_STREAM, the sender will consider the frames to count + against the flow control window. + + + An endpoint might receive a PUSH_PROMISE frame after it sends + RST_STREAM. PUSH_PROMISE causes a stream to become + "reserved" even if the associated stream has been reset. Therefore, a + RST_STREAM is needed to close an unwanted promised stream. + + + + + + In the absence of more specific guidance elsewhere in this document, implementations + SHOULD treat the receipt of a frame that is not expressly permitted in the description of + a state as a connection error of type + PROTOCOL_ERROR. Frame of unknown types are ignored. + + + An example of the state transitions for an HTTP request/response exchange can be found in + . An example of the state transitions for server push can be + found in and . + + +
    + + Streams are identified with an unsigned 31-bit integer. Streams initiated by a client + MUST use odd-numbered stream identifiers; those initiated by the server MUST use + even-numbered stream identifiers. A stream identifier of zero (0x0) is used for + connection control messages; the stream identifier zero cannot be used to establish a + new stream. + + + HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are + responded to with a stream identifier of one (0x1). After the upgrade + completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 + cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. + + + The identifier of a newly established stream MUST be numerically greater than all + streams that the initiating endpoint has opened or reserved. This governs streams that + are opened using a HEADERS frame and streams that are reserved using + PUSH_PROMISE. An endpoint that receives an unexpected stream identifier + MUST respond with a connection error of + type PROTOCOL_ERROR. + + + The first use of a new stream identifier implicitly closes all streams in the "idle" + state that might have been initiated by that peer with a lower-valued stream identifier. + For example, if a client sends a HEADERS frame on stream 7 without ever + sending a frame on stream 5, then stream 5 transitions to the "closed" state when the + first frame for stream 7 is sent or received. + + + Stream identifiers cannot be reused. Long-lived connections can result in an endpoint + exhausting the available range of stream identifiers. A client that is unable to + establish a new stream identifier can establish a new connection for new streams. A + server that is unable to establish a new stream identifier can send a + GOAWAY frame so that the client is forced to open a new connection for + new streams. + +
    + +
    + + A peer can limit the number of concurrently active streams using the + SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent + streams setting is specific to each endpoint and applies only to the peer that receives + the setting. That is, clients specify the maximum number of concurrent streams the + server can initiate, and servers specify the maximum number of concurrent streams the + client can initiate. + + + Streams that are in the "open" state, or either of the "half closed" states count toward + the maximum number of streams that an endpoint is permitted to open. Streams in any of + these three states count toward the limit advertised in the + SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the + "reserved" states do not count toward the stream limit. + + + Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a + HEADERS frame that causes their advertised concurrent stream limit to be + exceeded MUST treat this as a stream error. An + endpoint that wishes to reduce the value of + SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current + number of open streams can either close streams that exceed the new value or allow + streams to complete. + +
    +
    + +
    + + Using streams for multiplexing introduces contention over use of the TCP connection, + resulting in blocked streams. A flow control scheme ensures that streams on the same + connection do not destructively interfere with each other. Flow control is used for both + individual streams and for the connection as a whole. + + + HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. + + +
    + + HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be + used without requiring protocol changes. Flow control in HTTP/2 has the following + characteristics: + + + Flow control is specific to a connection; i.e., it is "hop-by-hop", not + "end-to-end". + + + Flow control is based on window update frames. Receivers advertise how many octets + they are prepared to receive on a stream and for the entire connection. This is a + credit-based scheme. + + + Flow control is directional with overall control provided by the receiver. A + receiver MAY choose to set any window size that it desires for each stream and for + the entire connection. A sender MUST respect flow control limits imposed by a + receiver. Clients, servers and intermediaries all independently advertise their + flow control window as a receiver and abide by the flow control limits set by + their peer when sending. + + + The initial value for the flow control window is 65,535 octets for both new streams + and the overall connection. + + + The frame type determines whether flow control applies to a frame. Of the frames + specified in this document, only DATA frames are subject to flow + control; all other frame types do not consume space in the advertised flow control + window. This ensures that important control frames are not blocked by flow control. + + + Flow control cannot be disabled. + + + HTTP/2 defines only the format and semantics of the WINDOW_UPDATE + frame (). This document does not stipulate how a + receiver decides when to send this frame or the value that it sends, nor does it + specify how a sender chooses to send packets. Implementations are able to select + any algorithm that suits their needs. + + + + + Implementations are also responsible for managing how requests and responses are sent + based on priority; choosing how to avoid head of line blocking for requests; and + managing the creation of new streams. Algorithm choices for these could interact with + any flow control algorithm. + +
    + +
    + + Flow control is defined to protect endpoints that are operating under resource + constraints. For example, a proxy needs to share memory between many connections, and + also might have a slow upstream connection and a fast downstream one. Flow control + addresses cases where the receiver is unable process data on one stream, yet wants to + continue to process other streams in the same connection. + + + Deployments that do not require this capability can advertise a flow control window of + the maximum size, incrementing the available space when new data is received. This + effectively disables flow control for that receiver. Conversely, a sender is always + subject to the flow control window advertised by the receiver. + + + Deployments with constrained resources (for example, memory) can employ flow control to + limit the amount of memory a peer can consume. Note, however, that this can lead to + suboptimal use of available network resources if flow control is enabled without + knowledge of the bandwidth-delay product (see ). + + + Even with full awareness of the current bandwidth-delay product, implementation of flow + control can be difficult. When using flow control, the receiver MUST read from the TCP + receive buffer in a timely fashion. Failure to do so could lead to a deadlock when + critical frames, such as WINDOW_UPDATE, are not read and acted upon. + +
    +
    + +
    + + A client can assign a priority for a new stream by including prioritization information in + the HEADERS frame that opens the stream. For an existing + stream, the PRIORITY frame can be used to change the + priority. + + + The purpose of prioritization is to allow an endpoint to express how it would prefer its + peer allocate resources when managing concurrent streams. Most importantly, priority can + be used to select streams for transmitting frames when there is limited capacity for + sending. + + + Streams can be prioritized by marking them as dependent on the completion of other streams + (). Each dependency is assigned a relative weight, a number + that is used to determine the relative proportion of available resources that are assigned + to streams dependent on the same stream. + + + + Explicitly setting the priority for a stream is input to a prioritization process. It + does not guarantee any particular processing or transmission order for the stream relative + to any other stream. An endpoint cannot force a peer to process concurrent streams in a + particular order using priority. Expressing priority is therefore only ever a suggestion. + + + Providing prioritization information is optional, so default values are used if no + explicit indicator is provided (). + + +
    + + Each stream can be given an explicit dependency on another stream. Including a + dependency expresses a preference to allocate resources to the identified stream rather + than to the dependent stream. + + + A stream that is not dependent on any other stream is given a stream dependency of 0x0. + In other words, the non-existent stream 0 forms the root of the tree. + + + A stream that depends on another stream is a dependent stream. The stream upon which a + stream is dependent is a parent stream. A dependency on a stream that is not currently + in the tree - such as a stream in the "idle" state - results in that stream being given + a default priority. + + + When assigning a dependency on another stream, the stream is added as a new dependency + of the parent stream. Dependent streams that share the same parent are not ordered with + respect to each other. For example, if streams B and C are dependent on stream A, and + if stream D is created with a dependency on stream A, this results in a dependency order + of A followed by B, C, and D in any order. + +
    + /|\ + B C B D C +]]> +
    + + An exclusive flag allows for the insertion of a new level of dependencies. The + exclusive flag causes the stream to become the sole dependency of its parent stream, + causing other dependencies to become dependent on the exclusive stream. In the + previous example, if stream D is created with an exclusive dependency on stream A, this + results in D becoming the dependency parent of B and C. + +
    + D + B C / \ + B C +]]> +
    + + Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all + of the streams that it depends on (the chain of parent streams up to 0x0) are either + closed, or it is not possible to make progress on them. + + + A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. + +
    + +
    + + All dependent streams are allocated an integer weight between 1 and 256 (inclusive). + + + Streams with the same parent SHOULD be allocated resources proportionally based on their + weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A + with weight 12, and if no progress can be made on A, stream B ideally receives one third + of the resources allocated to stream C. + +
    + +
    + + Stream priorities are changed using the PRIORITY frame. Setting a + dependency causes a stream to become dependent on the identified parent stream. + + + Dependent streams move with their parent stream if the parent is reprioritized. Setting + a dependency with the exclusive flag for a reprioritized stream moves all the + dependencies of the new parent stream to become dependent on the reprioritized stream. + + + If a stream is made dependent on one of its own dependencies, the formerly dependent + stream is first moved to be dependent on the reprioritized stream's previous parent. + The moved dependency retains its weight. + +
    + + For example, consider an original dependency tree where B and C depend on A, D and E + depend on C, and F depends on D. If A is made dependent on D, then D takes the place + of A. All other dependency relationships stay the same, except for F, which becomes + dependent on A if the reprioritization is exclusive. + + F B C ==> F A OR A + / \ | / \ /|\ + D E E B C B C F + | | | + F E E + (intermediate) (non-exclusive) (exclusive) +]]> +
    +
    + +
    + + When a stream is removed from the dependency tree, its dependencies can be moved to + become dependent on the parent of the closed stream. The weights of new dependencies + are recalculated by distributing the weight of the dependency of the closed stream + proportionally based on the weights of its dependencies. + + + Streams that are removed from the dependency tree cause some prioritization information + to be lost. Resources are shared between streams with the same parent stream, which + means that if a stream in that set closes or becomes blocked, any spare capacity + allocated to a stream is distributed to the immediate neighbors of the stream. However, + if the common dependency is removed from the tree, those streams share resources with + streams at the next highest level. + + + For example, assume streams A and B share a parent, and streams C and D both depend on + stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, + then stream C receives all the resources dedicated to stream A. If stream A is removed + from the tree, the weight of stream A is divided between streams C and D. If stream D + is still unable to proceed, this results in stream C receiving a reduced proportion of + resources. For equal starting weights, C receives one third, rather than one half, of + available resources. + + + It is possible for a stream to become closed while prioritization information that + creates a dependency on that stream is in transit. If a stream identified in a + dependency has no associated priority information, then the dependent stream is instead + assigned a default priority. This potentially creates + suboptimal prioritization, since the stream could be given a priority that is different + to what is intended. + + + To avoid these problems, an endpoint SHOULD retain stream prioritization state for a + period after streams become closed. The longer state is retained, the lower the chance + that streams are assigned incorrect or default priority values. + + + This could create a large state burden for an endpoint, so this state MAY be limited. + An endpoint MAY apply a fixed upper limit on the number of closed streams for which + prioritization state is tracked to limit state exposure. The amount of additional state + an endpoint maintains could be dependent on load; under high load, prioritization state + can be discarded to limit resource commitments. In extreme cases, an endpoint could + even discard prioritization state for active or reserved streams. If a fixed limit is + applied, endpoints SHOULD maintain state for at least as many streams as allowed by + their setting for SETTINGS_MAX_CONCURRENT_STREAMS. + + + An endpoint receiving a PRIORITY frame that changes the priority of a + closed stream SHOULD alter the dependencies of the streams that depend on it, if it has + retained enough state to do so. + +
    + +
    + + Providing priority information is optional. Streams are assigned a non-exclusive + dependency on stream 0x0 by default. Pushed streams + initially depend on their associated stream. In both cases, streams are assigned a + default weight of 16. + +
    +
    + +
    + + HTTP/2 framing permits two classes of error: + + + An error condition that renders the entire connection unusable is a connection error. + + + An error in an individual stream is a stream error. + + + + + A list of error codes is included in . + + +
    + + A connection error is any error which prevents further processing of the framing layer, + or which corrupts any connection state. + + + An endpoint that encounters a connection error SHOULD first send a GOAWAY + frame () with the stream identifier of the last stream that it + successfully received from its peer. The GOAWAY frame includes an error + code that indicates why the connection is terminating. After sending the + GOAWAY frame, the endpoint MUST close the TCP connection. + + + It is possible that the GOAWAY will not be reliably received by the + receiving endpoint (see ). In the event of a connection error, + GOAWAY only provides a best effort attempt to communicate with the peer + about why the connection is being terminated. + + + An endpoint can end a connection at any time. In particular, an endpoint MAY choose to + treat a stream error as a connection error. Endpoints SHOULD send a + GOAWAY frame when ending a connection, providing that circumstances + permit it. + +
    + +
    + + A stream error is an error related to a specific stream that does not affect processing + of other streams. + + + An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error + occurred. The RST_STREAM frame includes an error code that indicates the + type of error. + + + A RST_STREAM is the last frame that an endpoint can send on a stream. + The peer that sends the RST_STREAM frame MUST be prepared to receive any + frames that were sent or enqueued for sending by the remote peer. These frames can be + ignored, except where they modify connection state (such as the state maintained for + header compression, or flow control). + + + Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for + any stream. However, an endpoint MAY send additional RST_STREAM frames if + it receives frames on a closed stream after more than a round-trip time. This behavior + is permitted to deal with misbehaving implementations. + + + An endpoint MUST NOT send a RST_STREAM in response to an + RST_STREAM frame, to avoid looping. + +
    + +
    + + If the TCP connection is closed or reset while streams remain in open or half closed + states, then the endpoint MUST assume that those streams were abnormally interrupted and + could be incomplete. + +
    +
    + +
    + + HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide + additional services or alter any aspect of the protocol, within the limitations described + in this section. Extensions are effective only within the scope of a single HTTP/2 + connection. + + + Extensions are permitted to use new frame types, new + settings, or new error + codes. Registries are established for managing these extension points: frame types, settings and + error codes. + + + Implementations MUST ignore unknown or unsupported values in all extensible protocol + elements. Implementations MUST discard frames that have unknown or unsupported types. + This means that any of these extension points can be safely used by extensions without + prior arrangement or negotiation. However, extension frames that appear in the middle of + a header block are not permitted; these MUST be treated + as a connection error of type + PROTOCOL_ERROR. + + + However, extensions that could change the semantics of existing protocol components MUST + be negotiated before being used. For example, an extension that changes the layout of the + HEADERS frame cannot be used until the peer has given a positive signal + that this is acceptable. In this case, it could also be necessary to coordinate when the + revised layout comes into effect. Note that treating any frame other than + DATA frames as flow controlled is such a change in semantics, and can only + be done through negotiation. + + + This document doesn't mandate a specific method for negotiating the use of an extension, + but notes that a setting could be used for that + purpose. If both peers set a value that indicates willingness to use the extension, then + the extension can be used. If a setting is used for extension negotiation, the initial + value MUST be defined so that the extension is initially disabled. + +
    +
    + +
    + + This specification defines a number of frame types, each identified by a unique 8-bit type + code. Each frame type serves a distinct purpose either in the establishment and management + of the connection as a whole, or of individual streams. + + + The transmission of specific frame types can alter the state of a connection. If endpoints + fail to maintain a synchronized view of the connection state, successful communication + within the connection will no longer be possible. Therefore, it is important that endpoints + have a shared comprehension of how the state is affected by the use any given frame. + + +
    + + DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated + with a stream. One or more DATA frames are used, for instance, to carry HTTP request or + response payloads. + + + DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to + obscure the size of messages. + +
    + +
    + + The DATA frame contains the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is optional and is only present if the PADDED flag is set. + + + Application data. The amount of data is the remainder of the frame payload after + subtracting the length of the other fields that are present. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The DATA frame defines the following flags: + + + Bit 1 being set indicates that this frame is the last that the endpoint will send for + the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. + + + Bit 4 being set indicates that the Pad Length field and any padding that it describes + is present. + + + + + DATA frames MUST be associated with a stream. If a DATA frame is received whose stream + identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + DATA frames are subject to flow control and can only be sent when a stream is in the + "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow + control, including Pad Length and Padding fields if present. If a DATA frame is received + whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond + with a stream error of type + STREAM_CLOSED. + + + The total number of padding octets is determined by the value of the Pad Length field. If + the length of the padding is greater than the length of the frame payload, the recipient + MUST treat this as a connection error of + type PROTOCOL_ERROR. + + + A frame can be increased in size by one octet by including a Pad Length field with a + value of zero. + + + + + Padding is a security feature; see . + +
    + +
    + + The HEADERS frame (type=0x1) is used to open a stream, + and additionally carries a header block fragment. HEADERS frames can be sent on a stream + in the "open" or "half closed (remote)" states. + +
    + +
    + + The HEADERS frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. + + + A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. + + + An 8-bit weight for the stream, see . Add one to the + value to obtain a weight between 1 and 256. This field is only present if the + PRIORITY flag is set. + + + A header block fragment. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The HEADERS frame defines the following flags: + + + + Bit 1 being set indicates that the header block is + the last that the endpoint will send for the identified stream. Setting this flag + causes the stream to enter one of "half closed" + states. + + + A HEADERS frame carries the END_STREAM flag that signals the end of a stream. + However, a HEADERS frame with the END_STREAM flag set can be followed by + CONTINUATION frames on the same stream. Logically, the + CONTINUATION frames are part of the HEADERS frame. + + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A HEADERS frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the + receipt of any other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight + fields are present; see . + + + + + + + The payload of a HEADERS frame contains a header block + fragment. A header block that does not fit within a HEADERS frame is continued in + a CONTINUATION frame. + + + + HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose + stream identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + The HEADERS frame changes the connection state as described in . + + + + The HEADERS frame includes optional padding. Padding fields and flags are identical to + those defined for DATA frames. + + + Prioritization information in a HEADERS frame is logically equivalent to a separate + PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in + stream prioritization when new streams are created. Priorization fields in HEADERS frames + subsequent to the first on a stream reprioritize the + stream. + +
    + +
    + + The PRIORITY frame (type=0x2) specifies the sender-advised + priority of a stream. It can be sent at any time for an existing stream, including + closed streams. This enables reprioritization of existing streams. + +
    + +
    + + The payload of a PRIORITY frame contains the following fields: + + + A single bit flag indicates that the stream dependency is exclusive, see . + + + A 31-bit stream identifier for the stream that this stream depends on, see . + + + An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. + + + + + + The PRIORITY frame does not define any flags. + + + + The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received + with a stream identifier of 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", + "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be + sent between consecutive frames that comprise a single header + block. Note that this frame could arrive after processing or frame sending has + completed, which would cause it to have no effect on the current stream. For a stream + that is in the "half closed (remote)" or "closed" - state, this frame can only affect + processing of the current stream and not frame transmission. + + + The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. + This allows for the reprioritization of a group of dependent streams by altering the + priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a + closed stream risks being ignored due to the peer having discarded priority state + information for that stream. + +
    + +
    + + The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by + the initiator of a stream, it indicates that they wish to cancel the stream or that an + error condition has occurred. When sent by the receiver of a stream, it indicates that + either the receiver is rejecting the stream, requesting that the stream be cancelled, or + that an error condition has occurred. + +
    + +
    + + + The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being + terminated. + + + + The RST_STREAM frame does not define any flags. + + + + The RST_STREAM frame fully terminates the referenced stream and causes it to enter the + closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send + additional frames for that stream, with the exception of PRIORITY. However, + after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process + additional frames sent on the stream that might have been sent by the peer prior to the + arrival of the RST_STREAM. + + + + RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received + with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + + + RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM + frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + +
    + +
    + + The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints + communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is + also used to acknowledge the receipt of those parameters. Individually, a SETTINGS + parameter can also be referred to as a "setting". + + + SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, + which are used by the receiving peer. Different values for the same parameter can be + advertised by each peer. For example, a client might set a high initial flow control + window, whereas a server might set a lower value to conserve resources. + + + + A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be + sent at any other time by either endpoint over the lifetime of the connection. + Implementations MUST support all of the parameters defined by this specification. + + + + Each parameter in a SETTINGS frame replaces any existing value for that parameter. + Parameters are processed in the order in which they appear, and a receiver of a SETTINGS + frame does not need to maintain any state other than the current value of its + parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by + a receiver. + + + SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS + frame defines the following flag: + + + Bit 1 being set indicates that this frame acknowledges receipt and application of the + peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST + be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value + other than 0 MUST be treated as a connection + error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. + + + + + SETTINGS frames always apply to a connection, never a single stream. The stream + identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS + frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond + with a connection error of type + PROTOCOL_ERROR. + + + The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame + MUST be treated as a connection error of type + PROTOCOL_ERROR. + + +
    + + The payload of a SETTINGS frame consists of zero or more parameters, each consisting of + an unsigned 16-bit setting identifier and an unsigned 32-bit value. + + +
    + +
    +
    + +
    + + The following parameters are defined: + + + + Allows the sender to inform the remote endpoint of the maximum size of the header + compression table used to decode header blocks, in octets. The encoder can select + any size equal to or less than this value by using signaling specific to the + header compression format inside a header block. The initial value is 4,096 + octets. + + + + + This setting can be use to disable server + push. An endpoint MUST NOT send a PUSH_PROMISE frame if it + receives this parameter set to a value of 0. An endpoint that has both set this + parameter to 0 and had it acknowledged MUST treat the receipt of a + PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + The initial value is 1, which indicates that server push is permitted. Any value + other than 0 or 1 MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + Indicates the maximum number of concurrent streams that the sender will allow. + This limit is directional: it applies to the number of streams that the sender + permits the receiver to create. Initially there is no limit to this value. It is + recommended that this value be no smaller than 100, so as to not unnecessarily + limit parallelism. + + + A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special + by endpoints. A zero value does prevent the creation of new streams, however this + can also happen for any limit that is exhausted with active streams. Servers + SHOULD only set a zero value for short durations; if a server does not wish to + accept requests, closing the connection could be preferable. + + + + + Indicates the sender's initial window size (in octets) for stream level flow + control. The initial value is 216-1 (65,535) octets. + + + This setting affects the window size of all streams, including existing streams, + see . + + + Values above the maximum flow control window size of 231-1 MUST + be treated as a connection error of + type FLOW_CONTROL_ERROR. + + + + + Indicates the size of the largest frame payload that the sender is willing to + receive, in octets. + + + The initial value is 214 (16,384) octets. The value advertised by + an endpoint MUST be between this initial value and the maximum allowed frame size + (224-1 or 16,777,215 octets), inclusive. Values outside this range + MUST be treated as a connection error + of type PROTOCOL_ERROR. + + + + + This advisory setting informs a peer of the maximum size of header list that the + sender is prepared to accept, in octets. The value is based on the uncompressed + size of header fields, including the length of the name and value in octets plus + an overhead of 32 octets for each header field. + + + For any given request, a lower limit than what is advertised MAY be enforced. The + initial value of this setting is unlimited. + + + + + + An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier + MUST ignore that setting. + +
    + +
    + + Most values in SETTINGS benefit from or require an understanding of when the peer has + received and applied the changed parameter values. In order to provide + such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag + is not set MUST apply the updated parameters as soon as possible upon receipt. + + + The values in the SETTINGS frame MUST be processed in the order they appear, with no + other frame processing between values. Unsupported parameters MUST be ignored. Once + all values have been processed, the recipient MUST immediately emit a SETTINGS frame + with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender + of the altered parameters can rely on the setting having been applied. + + + If the sender of a SETTINGS frame does not receive an acknowledgement within a + reasonable amount of time, it MAY issue a connection error of type + SETTINGS_TIMEOUT. + +
    +
    + +
    + + The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of + streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned + 31-bit identifier of the stream the endpoint plans to create along with a set of headers + that provide additional context for the stream. contains a + thorough description of the use of PUSH_PROMISE frames. + + +
    + +
    + + The PUSH_PROMISE frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single reserved bit. + + + An unsigned 31-bit integer that identifies the stream that is reserved by the + PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next + stream sent by the sender (see new stream + identifier). + + + A header block fragment containing request header + fields. + + + Padding octets. + + + + + + The PUSH_PROMISE frame defines the following flags: + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any + other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + + + PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream + identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the + stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + Promised streams are not required to be used in the order they are promised. The + PUSH_PROMISE only reserves stream identifiers for later use. + + + + PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the + peer endpoint is set to 0. An endpoint that has set this setting and has received + acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a + RST_STREAM referencing the promised stream identifier back to the sender of + the PUSH_PROMISE. + + + + A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for + header compression. PUSH_PROMISE also reserves a stream for later use, causing the + promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a + stream unless that stream is either "open" or "half closed (remote)"; the sender MUST + ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST + be in the "idle" state). + + + Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream + state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a + stream that is neither "open" nor "half closed (local)" as a connection error of type + PROTOCOL_ERROR. However, an endpoint that has sent + RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that + might have been created before the RST_STREAM frame is received and + processed. + + + A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a + stream that is not currently in the "idle" state) as a connection error of type + PROTOCOL_ERROR. + + + + The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical + to those defined for DATA frames. + +
    + +
    + + The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the + sender, as well as determining whether an idle connection is still functional. PING + frames can be sent from any endpoint. + +
    + +
    + + + In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. + A sender can include any value it chooses and use those bytes in any fashion. + + + Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with + the ACK flag set in response, with an identical payload. PING responses SHOULD be given + higher priority than any other frame. + + + + The PING frame defines the following flags: + + + Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST + set this flag in PING responses. An endpoint MUST NOT respond to PING frames + containing this flag. + + + + + PING frames are not associated with any individual stream. If a PING frame is received + with a stream identifier field value other than 0x0, the recipient MUST respond with a + connection error of type + PROTOCOL_ERROR. + + + Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type + FRAME_SIZE_ERROR. + + +
    + +
    + + The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this + connection. GOAWAY can be sent by either the client or the server. Once sent, the sender + will ignore frames sent on any new streams with identifiers higher than the included last + stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the + connection, although a new connection can be established for new streams. + + + The purpose of this frame is to allow an endpoint to gracefully stop accepting new + streams, while still finishing processing of previously established streams. This enables + administrative actions, like server maintainance. + + + There is an inherent race condition between an endpoint starting new streams and the + remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream + identifier of the last peer-initiated stream which was or might be processed on the + sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, + the identified stream is the highest numbered stream initiated by the client. + + + If the receiver of the GOAWAY has sent data on streams with a higher stream identifier + than what is indicated in the GOAWAY frame, those streams are not or will not be + processed. The receiver of the GOAWAY frame can treat the streams as though they had + never been created at all, thereby allowing those streams to be retried later on a new + connection. + + + Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote + can know whether a stream has been partially processed or not. For example, if an HTTP + client sends a POST at the same time that a server closes a connection, the client cannot + know if the server started to process that POST request if the server does not send a + GOAWAY frame to indicate what streams it might have acted on. + + + An endpoint might choose to close a connection without sending GOAWAY for misbehaving + peers. + + +
    + +
    + + The GOAWAY frame does not define any flags. + + + The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat + a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type + PROTOCOL_ERROR. + + + The last stream identifier in the GOAWAY frame contains the highest numbered stream + identifier for which the sender of the GOAWAY frame might have taken some action on, or + might yet take action on. All streams up to and including the identified stream might + have been processed in some way. The last stream identifier can be set to 0 if no streams + were processed. + + + In this context, "processed" means that some data from the stream was passed to some + higher layer of software that might have taken some action as a result. + + + If a connection terminates without a GOAWAY frame, the last stream identifier is + effectively the highest possible stream identifier. + + + On streams with lower or equal numbered identifiers that were not closed completely prior + to the connection being closed, re-attempting requests, transactions, or any protocol + activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or + DELETE. Any protocol activity that uses higher numbered streams can be safely retried + using a new connection. + + + Activity on streams numbered lower or equal to the last stream identifier might still + complete successfully. The sender of a GOAWAY frame might gracefully shut down a + connection by sending a GOAWAY frame, maintaining the connection in an open state until + all in-progress streams complete. + + + An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an + endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could + subsequently encounter an condition that requires immediate termination of the connection. + The last stream identifier from the last GOAWAY frame received indicates which streams + could have been acted upon. Endpoints MUST NOT increase the value they send in the last + stream identifier, since the peers might already have retried unprocessed requests on + another connection. + + + A client that is unable to retry requests loses all requests that are in flight when the + server closes the connection. This is especially true for intermediaries that might + not be serving clients using HTTP/2. A server that is attempting to gracefully shut down + a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to + 231-1 and a NO_ERROR code. This signals to the client that + a shutdown is imminent and that no further requests can be initiated. After waiting at + least one round trip time, the server can send another GOAWAY frame with an updated last + stream identifier. This ensures that a connection can be cleanly shut down without losing + requests. + + + + After sending a GOAWAY frame, the sender can discard frames for streams with identifiers + higher than the identified last stream. However, any frames that alter connection state + cannot be completely ignored. For instance, HEADERS, + PUSH_PROMISE and CONTINUATION frames MUST be minimally + processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow + control window. Failure to process these frames can cause flow control or header + compression state to become unsynchronized. + + + + The GOAWAY frame also contains a 32-bit error code that + contains the reason for closing the connection. + + + Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug + data is intended for diagnostic purposes only and carries no semantic value. Debug + information could contain security- or privacy-sensitive data. Logged or otherwise + persistently stored debug data MUST have adequate safeguards to prevent unauthorized + access. + +
    + +
    + + The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. + + + Flow control operates at two levels: on each individual stream and on the entire + connection. + + + Both types of flow control are hop-by-hop; that is, only between the two endpoints. + Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. + However, throttling of data transfer by any receiver can indirectly cause the propagation + of flow control information toward the original sender. + + + Flow control only applies to frames that are identified as being subject to flow control. + Of the frame types defined in this document, this includes only DATA frames. + Frames that are exempt from flow control MUST be accepted and processed, unless the + receiver is unable to assign resources to handling the frame. A receiver MAY respond with + a stream error or connection error of type + FLOW_CONTROL_ERROR if it is unable to accept a frame. + +
    + +
    + + The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer + indicating the number of octets that the sender can transmit in addition to the existing + flow control window. The legal range for the increment to the flow control window is 1 to + 231-1 (0x7fffffff) octets. + + + The WINDOW_UPDATE frame does not define any flags. + + + The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the + former case, the frame's stream identifier indicates the affected stream; in the latter, + the value "0" indicates that the entire connection is the subject of the frame. + + + A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window + increment of 0 as a stream error of type + PROTOCOL_ERROR; errors on the connection flow control window MUST be + treated as a connection error. + + + WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. + This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" + or "closed" stream. A receiver MUST NOT treat this as an error, see . + + + A receiver that receives a flow controlled frame MUST always account for its contribution + against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the + frame is in error. Since the sender counts the frame toward the flow control window, if + the receiver does not, the flow control window at sender and receiver can become + different. + + +
    + + Flow control in HTTP/2 is implemented using a window kept by each sender on every + stream. The flow control window is a simple integer value that indicates how many octets + of data the sender is permitted to transmit; as such, its size is a measure of the + buffering capacity of the receiver. + + + Two flow control windows are applicable: the stream flow control window and the + connection flow control window. The sender MUST NOT send a flow controlled frame with a + length that exceeds the space available in either of the flow control windows advertised + by the receiver. Frames with zero length with the END_STREAM flag set (that is, an + empty DATA frame) MAY be sent if there is no available space in either + flow control window. + + + For flow control calculations, the 9 octet frame header is not counted. + + + After sending a flow controlled frame, the sender reduces the space available in both + windows by the length of the transmitted frame. + + + The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up + space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream + and connection level flow control windows. + + + A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the + amount specified in the frame. + + + A sender MUST NOT allow a flow control window to exceed 231-1 octets. + If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this + maximum it MUST terminate either the stream or the connection, as appropriate. For + streams, the sender sends a RST_STREAM with the error code of + FLOW_CONTROL_ERROR code; for the connection, a GOAWAY + frame with a FLOW_CONTROL_ERROR code. + + + Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are + completely asynchronous with respect to each other. This property allows a receiver to + aggressively update the window size kept by the sender to prevent streams from stalling. + +
    + +
    + + When an HTTP/2 connection is first established, new streams are created with an initial + flow control window size of 65,535 octets. The connection flow control window is 65,535 + octets. Both endpoints can adjust the initial window size for new streams by including + a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS + frame that forms part of the connection preface. The connection flow control window can + only be changed using WINDOW_UPDATE frames. + + + Prior to receiving a SETTINGS frame that sets a value for + SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default + initial window size when sending flow controlled frames. Similarly, the connection flow + control window is set to the default initial window size until a WINDOW_UPDATE frame is + received. + + + A SETTINGS frame can alter the initial flow control window size for all + current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, + a receiver MUST adjust the size of all stream flow control windows that it maintains by + the difference between the new value and the old value. + + + A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in + a flow control window to become negative. A sender MUST track the negative flow control + window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE + frames that cause the flow control window to become positive. + + + For example, if the client sends 60KB immediately on connection establishment, and the + server sets the initial window size to be 16KB, the client will recalculate the + available flow control window to be -44KB on receipt of the SETTINGS + frame. The client retains a negative flow control window until WINDOW_UPDATE frames + restore the window to being positive, after which the client can resume sending. + + + A SETTINGS frame cannot alter the connection flow control window. + + + An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that + causes any flow control window to exceed the maximum size as a connection error of type + FLOW_CONTROL_ERROR. + +
    + +
    + + A receiver that wishes to use a smaller flow control window than the current size can + send a new SETTINGS frame. However, the receiver MUST be prepared to + receive data that exceeds this window size, since the sender might send data that + exceeds the lower limit prior to processing the SETTINGS frame. + + + After sending a SETTINGS frame that reduces the initial flow control window size, a + receiver has two options for handling streams that exceed flow control limits: + + + The receiver can immediately send RST_STREAM with + FLOW_CONTROL_ERROR error code for the affected streams. + + + The receiver can accept the streams and tolerate the resulting head of line + blocking, sending WINDOW_UPDATE frames as it consumes data. + + + +
    +
    + +
    + + The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can + be sent on an existing stream, as long as the preceding frame is on the same stream and is + a HEADERS, PUSH_PROMISE or CONTINUATION frame without the + END_HEADERS flag set. + + +
    + +
    + + The CONTINUATION frame payload contains a header block + fragment. + + + + The CONTINUATION frame defines the following flag: + + + + Bit 3 being set indicates that this frame ends a header + block. + + + If the END_HEADERS bit is not set, this frame MUST be followed by another + CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or + a frame on a different stream as a connection + error of type PROTOCOL_ERROR. + + + + + + + The CONTINUATION frame changes the connection state as defined in . + + + + CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received + whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. + + + + A CONTINUATION frame MUST be preceded by a HEADERS, + PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A + recipient that observes violation of this rule MUST respond with a connection error of type + PROTOCOL_ERROR. + +
    +
    + +
    + + Error codes are 32-bit fields that are used in RST_STREAM and + GOAWAY frames to convey the reasons for the stream or connection error. + + + + Error codes share a common code space. Some error codes apply only to either streams or the + entire connection and have no defined semantics in the other context. + + + + The following error codes are defined: + + + The associated condition is not as a result of an error. For example, a + GOAWAY might include this code to indicate graceful shutdown of a + connection. + + + The endpoint detected an unspecific protocol error. This error is for use when a more + specific error code is not available. + + + The endpoint encountered an unexpected internal error. + + + The endpoint detected that its peer violated the flow control protocol. + + + The endpoint sent a SETTINGS frame, but did not receive a response in a + timely manner. See Settings Synchronization. + + + The endpoint received a frame after a stream was half closed. + + + The endpoint received a frame with an invalid size. + + + The endpoint refuses the stream prior to performing any application processing, see + for details. + + + Used by the endpoint to indicate that the stream is no longer needed. + + + The endpoint is unable to maintain the header compression context for the connection. + + + The connection established in response to a CONNECT + request was reset or abnormally closed. + + + The endpoint detected that its peer is exhibiting a behavior that might be generating + excessive load. + + + The underlying transport has properties that do not meet minimum security + requirements (see ). + + + + + Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be + treated by an implementation as being equivalent to INTERNAL_ERROR. + +
    + +
    + + HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means + that, from the application perspective, the features of the protocol are largely + unchanged. To achieve this, all request and response semantics are preserved, although the + syntax of conveying those semantics has changed. + + + Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax + and Routing , such as the HTTP and HTTPS URI schemes, are also + applicable in HTTP/2, but the expression of those semantics for this protocol are defined + in the sections below. + + +
    + + A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on + the same stream as the request. + + + An HTTP message (request or response) consists of: + + + for a response only, zero or more HEADERS frames (each followed by zero + or more CONTINUATION frames) containing the message headers of + informational (1xx) HTTP responses (see and ), + and + + + one HEADERS frame (followed by zero or more CONTINUATION + frames) containing the message headers (see ), and + + + zero or more DATA frames containing the message payload (see ), and + + + optionally, one HEADERS frame, followed by zero or more + CONTINUATION frames containing the trailer-part, if present (see ). + + + The last frame in the sequence bears an END_STREAM flag, noting that a + HEADERS frame bearing the END_STREAM flag can be followed by + CONTINUATION frames that carry any remaining portions of the header block. + + + Other frames (from any stream) MUST NOT occur between either HEADERS frame + and any CONTINUATION frames that might follow. + + + + Trailing header fields are carried in a header block that also terminates the stream. + That is, a sequence starting with a HEADERS frame, followed by zero or more + CONTINUATION frames, where the HEADERS frame bears an + END_STREAM flag. Header blocks after the first that do not terminate the stream are not + part of an HTTP request or response. + + + A HEADERS frame (and associated CONTINUATION frames) can + only appear at the start or end of a stream. An endpoint that receives a + HEADERS frame without the END_STREAM flag set after receiving a final + (non-informational) status code MUST treat the corresponding request or response as malformed. + + + + An HTTP request/response exchange fully consumes a single stream. A request starts with + the HEADERS frame that puts the stream into an "open" state. The request + ends with a frame bearing END_STREAM, which causes the stream to become "half closed + (local)" for the client and "half closed (remote)" for the server. A response starts with + a HEADERS frame and ends with a frame bearing END_STREAM, which places the + stream in the "closed" state. + + + +
    + + HTTP/2 removes support for the 101 (Switching Protocols) informational status code + (). + + + The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. + Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate + their use (see ). + +
    + +
    + + HTTP header fields carry information as a series of key-value pairs. For a listing of + registered HTTP headers, see the Message Header Field Registry maintained at . + + +
    + + While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the + status code for the response, HTTP/2 uses special pseudo-header fields beginning with + ':' character (ASCII 0x3a) for this purpose. + + + Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate + pseudo-header fields other than those defined in this document. + + + Pseudo-header fields are only valid in the context in which they are defined. + Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header + fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST + NOT appear in trailers. Endpoints MUST treat a request or response that contains + undefined or invalid pseudo-header fields as malformed. + + + Just as in HTTP/1.x, header field names are strings of ASCII characters that are + compared in a case-insensitive fashion. However, header field names MUST be converted + to lowercase prior to their encoding in HTTP/2. A request or response containing + uppercase header field names MUST be treated as malformed. + + + All pseudo-header fields MUST appear in the header block before regular header fields. + Any request or response that contains a pseudo-header field that appears in a header + block after a regular header field MUST be treated as malformed. + +
    + +
    + + HTTP/2 does not use the Connection header field to + indicate connection-specific header fields; in this protocol, connection-specific + metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message + containing connection-specific header fields; any message containing + connection-specific header fields MUST be treated as malformed. + + + This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need + to remove any header fields nominated by the Connection header field, along with the + Connection header field itself. Such intermediaries SHOULD also remove other + connection-specific header fields, such as Keep-Alive, Proxy-Connection, + Transfer-Encoding and Upgrade, even if they are not nominated by Connection. + + + One exception to this is the TE header field, which MAY be present in an HTTP/2 + request, but when it is MUST NOT contain any value other than "trailers". + + + + + HTTP/2 purposefully does not support upgrade to another protocol. The handshake + methods described in are believed sufficient to + negotiate the use of alternative protocols. + + + +
    + +
    + + The following pseudo-header fields are defined for HTTP/2 requests: + + + + The :method pseudo-header field includes the HTTP + method (). + + + + + The :scheme pseudo-header field includes the scheme + portion of the target URI (). + + + :scheme is not restricted to http and https schemed URIs. A + proxy or gateway can translate requests for non-HTTP schemes, enabling the use + of HTTP to interact with non-HTTP services. + + + + + The :authority pseudo-header field includes the + authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http + or https schemed URIs. + + + To ensure that the HTTP/1.1 request line can be reproduced accurately, this + pseudo-header field MUST be omitted when translating from an HTTP/1.1 request + that has a request target in origin or asterisk form (see ). Clients that generate + HTTP/2 requests directly SHOULD use the :authority pseudo-header + field instead of the Host header field. An + intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by + copying the value of the :authority pseudo-header + field. + + + + + The :path pseudo-header field includes the path and + query parts of the target URI (the path-absolute + production from and optionally a '?' character + followed by the query production, see and ). A request in asterisk form includes the value '*' for the + :path pseudo-header field. + + + This pseudo-header field MUST NOT be empty for http + or https URIs; http or + https URIs that do not contain a path component + MUST include a value of '/'. The exception to this rule is an OPTIONS request + for an http or https + URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). + + + + + + All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory + pseudo-header fields is malformed. + + + HTTP/2 does not define a way to carry the version identifier that is included in the + HTTP/1.1 request line. + +
    + +
    + + For HTTP/2 responses, a single :status pseudo-header + field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all + responses, otherwise the response is malformed. + + + HTTP/2 does not define a way to carry the version or reason phrase that is included in + an HTTP/1.1 status line. + +
    + +
    + + The Cookie header field can carry a significant amount of + redundant data. + + + The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). + This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from + being separated into different name-value pairs. This can significantly reduce + compression efficiency as individual cookie-pairs are updated. + + + To allow for better compression efficiency, the Cookie header field MAY be split into + separate header fields, each with one or more cookie-pairs. If there are multiple + Cookie header fields after decompression, these MUST be concatenated into a single + octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") + before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a + generic HTTP server application. + +
    + + Therefore, the following two lists of Cookie header fields are semantically + equivalent. + + +
    +
    + +
    + + A malformed request or response is one that is an otherwise valid sequence of HTTP/2 + frames, but is otherwise invalid due to the presence of extraneous frames, prohibited + header fields, the absence of mandatory header fields, or the inclusion of uppercase + header field names. + + + A request or response that includes an entity body can include a content-length header field. A request or response is also + malformed if the value of a content-length header field + does not equal the sum of the DATA frame payload lengths that form the + body. A response that is defined to have no payload, as described in , can have a non-zero + content-length header field, even though no content is + included in DATA frames. + + + Intermediaries that process HTTP requests or responses (i.e., any intermediary not + acting as a tunnel) MUST NOT forward a malformed request or response. Malformed + requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. + + + For malformed requests, a server MAY send an HTTP response prior to closing or + resetting the stream. Clients MUST NOT accept a malformed response. Note that these + requirements are intended to protect against several types of common attacks against + HTTP; they are deliberately strict, because being permissive can expose + implementations to these vulnerabilities. + +
    +
    + +
    + + This section shows HTTP/1.1 requests and responses, with illustrations of equivalent + HTTP/2 requests and responses. + + + An HTTP GET request includes request header fields and no body and is therefore + transmitted as a single HEADERS frame, followed by zero or more + CONTINUATION frames containing the serialized block of request header + fields. The HEADERS frame in the following has both the END_HEADERS and + END_STREAM flags set; no CONTINUATION frames are sent: + + +
    + + END_STREAM + Accept: image/jpeg + END_HEADERS + :method = GET + :scheme = https + :path = /resource + host = example.org + accept = image/jpeg +]]> +
    + + + Similarly, a response that includes only response header fields is transmitted as a + HEADERS frame (again, followed by zero or more + CONTINUATION frames) containing the serialized block of response header + fields. + + +
    + + END_STREAM + Expires: Thu, 23 Jan ... + END_HEADERS + :status = 304 + etag = "xyzzy" + expires = Thu, 23 Jan ... +]]> +
    + + + An HTTP POST request that includes request header fields and payload data is transmitted + as one HEADERS frame, followed by zero or more + CONTINUATION frames containing the request header fields, followed by one + or more DATA frames, with the last CONTINUATION (or + HEADERS) frame having the END_HEADERS flag set and the final + DATA frame having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Type: image/jpeg - END_HEADERS + Content-Length: 123 :method = POST + :path = /resource + {binary data} :scheme = https + + CONTINUATION + + END_HEADERS + content-type = image/jpeg + host = example.org + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> + + Note that data contributing to any given header field could be spread between header + block fragments. The allocation of header fields to frames in this example is + illustrative only. + +
    + + + A response that includes header fields and payload data is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames, followed by one or more DATA frames, with the last + DATA frame in the sequence having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Length: 123 + END_HEADERS + :status = 200 + {binary data} content-type = image/jpeg + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> +
    + + + Trailing header fields are sent as a header block after both the request or response + header block and all the DATA frames have been sent. The + HEADERS frame starting the trailers header block has the END_STREAM flag + set. + + +
    + - END_STREAM + Transfer-Encoding: chunked + END_HEADERS + Trailer: Foo :status = 200 + content-length = 123 + 123 content-type = image/jpeg + {binary data} trailer = Foo + 0 + Foo: bar DATA + - END_STREAM + {binary data} + + HEADERS + + END_STREAM + + END_HEADERS + foo = bar +]]> +
    + + +
    + + An informational response using a 1xx status code other than 101 is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames: + + - END_STREAM + + END_HEADERS + :status = 103 + extension-field = bar +]]> +
    +
    + +
    + + In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error + occurs, because there is no means to determine the nature of the error. It is possible + that some server processing occurred prior to the error, which could result in + undesirable effects if the request were reattempted. + + + HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has + not been processed: + + + The GOAWAY frame indicates the highest stream number that might have + been processed. Requests on streams with higher numbers are therefore guaranteed to + be safe to retry. + + + The REFUSED_STREAM error code can be included in a + RST_STREAM frame to indicate that the stream is being closed prior to + any processing having occurred. Any request that was sent on the reset stream can + be safely retried. + + + + + Requests that have not been processed have not failed; clients MAY automatically retry + them, even those with non-idempotent methods. + + + A server MUST NOT indicate that a stream has not been processed unless it can guarantee + that fact. If frames that are on a stream are passed to the application layer for any + stream, then REFUSED_STREAM MUST NOT be used for that stream, and a + GOAWAY frame MUST include a stream identifier that is greater than or + equal to the given stream identifier. + + + In addition to these mechanisms, the PING frame provides a way for a + client to easily test a connection. Connections that remain idle can become broken as + some middleboxes (for instance, network address translators, or load balancers) silently + discard connection bindings. The PING frame allows a client to safely + test whether a connection is still active without sending a request. + +
    +
    + +
    + + HTTP/2 allows a server to pre-emptively send (or "push") responses (along with + corresponding "promised" requests) to a client in association with a previous + client-initiated request. This can be useful when the server knows the client will need + to have those responses available in order to fully process the response to the original + request. + + + + Pushing additional message exchanges in this fashion is optional, and is negotiated + between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set + to 0 to indicate that server push is disabled. + + + Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a + promised request that is not cacheable, unsafe or that includes a request body MUST + reset the stream with a stream error of type + PROTOCOL_ERROR. + + + Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP + cache. Pushed responses are considered successfully validated on the origin server (e.g., + if the "no-cache" cache response directive is present) while the stream identified by the + promised stream ID is still open. + + + Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY + be made available to the application separately. + + + An intermediary can receive pushes from the server and choose not to forward them on to + the client. In other words, how to make use of the pushed information is up to that + intermediary. Equally, the intermediary might choose to make additional pushes to the + client, without any action taken by the server. + + + A client cannot push. Thus, servers MUST treat the receipt of a + PUSH_PROMISE frame as a connection + error of type PROTOCOL_ERROR. Clients MUST reject any attempt to + change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating + the message as a connection error of type + PROTOCOL_ERROR. + + +
    + + Server push is semantically equivalent to a server responding to a request; however, in + this case that request is also sent by the server, as a PUSH_PROMISE + frame. + + + The PUSH_PROMISE frame includes a header block that contains a complete + set of request header fields that the server attributes to the request. It is not + possible to push a response to a request that includes a request body. + + + + Pushed responses are always associated with an explicit request from the client. The + PUSH_PROMISE frames sent by the server are sent on that explicit + request's stream. The PUSH_PROMISE frame also includes a promised stream + identifier, chosen from the stream identifiers available to the server (see ). + + + + The header fields in PUSH_PROMISE and any subsequent + CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in + the :method header field that is safe and cacheable. If a + client receives a PUSH_PROMISE that does not include a complete and valid + set of header fields, or the :method header field identifies + a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. + + + + The server SHOULD send PUSH_PROMISE () + frames prior to sending any frames that reference the promised responses. This avoids a + race where clients issue requests prior to receiving any PUSH_PROMISE + frames. + + + For example, if the server receives a request for a document containing embedded links + to multiple image files, and the server chooses to push those additional images to the + client, sending push promises before the DATA frames that contain the + image links ensures that the client is able to see the promises before discovering + embedded links. Similarly, if the server pushes responses referenced by the header block + (for instance, in Link header fields), sending the push promises before sending the + header block ensures that clients do not request them. + + + + PUSH_PROMISE frames MUST NOT be sent by the client. + + + PUSH_PROMISE frames can be sent by the server in response to any + client-initiated stream, but the stream MUST be in either the "open" or "half closed + (remote)" state with respect to the server. PUSH_PROMISE frames are + interspersed with the frames that comprise a response, though they cannot be + interspersed with HEADERS and CONTINUATION frames that + comprise a single header block. + + + Sending a PUSH_PROMISE frame creates a new stream and puts the stream + into the “reserved (local)” state for the server and the “reserved (remote)” state for + the client. + +
    + +
    + + After sending the PUSH_PROMISE frame, the server can begin delivering the + pushed response as a response on a server-initiated + stream that uses the promised stream identifier. The server uses this stream to + transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" + to the client after the initial HEADERS frame is sent. + + + + Once a client receives a PUSH_PROMISE frame and chooses to accept the + pushed response, the client SHOULD NOT issue any requests for the promised response + until after the promised stream has closed. + + + + If the client determines, for any reason, that it does not wish to receive the pushed + response from the server, or if the server takes too long to begin sending the promised + response, the client can send an RST_STREAM frame, using either the + CANCEL or REFUSED_STREAM codes, and referencing the pushed + stream's identifier. + + + A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the + number of responses that can be concurrently pushed by a server. Advertising a + SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by + preventing the server from creating the necessary streams. This does not prohibit a + server from sending PUSH_PROMISE frames; clients need to reset any + promised streams that are not wanted. + + + + Clients receiving a pushed response MUST validate that either the server is + authoritative (see ), or the proxy that provided the pushed + response is configured for the corresponding request. For example, a server that offers + a certificate for only the example.com DNS-ID or Common Name + is not permitted to push a response for https://www.example.org/doc. + + + The response for a PUSH_PROMISE stream begins with a + HEADERS frame, which immediately puts the stream into the “half closed + (remote)” state for the server and “half closed (local)” state for the client, and ends + with a frame bearing END_STREAM, which places the stream in the "closed" state. + + + The client never sends a frame with the END_STREAM flag for a server push. + + + +
    + +
    + +
    + + In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. + CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin + server for the purposes of interacting with https resources. + + + In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to + a remote host, for similar purposes. The HTTP header field mapping works as defined in + Request Header Fields, with a few + differences. Specifically: + + + The :method header field is set to CONNECT. + + + The :scheme and :path header + fields MUST be omitted. + + + The :authority header field contains the host and port to + connect to (equivalent to the authority-form of the request-target of CONNECT + requests, see ). + + + + + A proxy that supports CONNECT establishes a TCP connection to + the server identified in the :authority header field. Once + this connection is successfully established, the proxy sends a HEADERS + frame containing a 2xx series status code to the client, as defined in . + + + After the initial HEADERS frame sent by each peer, all subsequent + DATA frames correspond to data sent on the TCP connection. The payload of + any DATA frames sent by the client is transmitted by the proxy to the TCP + server; data received from the TCP server is assembled into DATA frames by + the proxy. Frame types other than DATA or stream management frames + (RST_STREAM, WINDOW_UPDATE, and PRIORITY) + MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. + + + The TCP connection can be closed by either peer. The END_STREAM flag on a + DATA frame is treated as being equivalent to the TCP FIN bit. A client is + expected to send a DATA frame with the END_STREAM flag set after receiving + a frame bearing the END_STREAM flag. A proxy that receives a DATA frame + with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP + segment. A proxy that receives a TCP segment with the FIN bit set sends a + DATA frame with the END_STREAM flag set. Note that the final TCP segment + or DATA frame could be empty. + + + A TCP connection error is signaled with RST_STREAM. A proxy treats any + error in the TCP connection, which includes receiving a TCP segment with the RST bit set, + as a stream error of type + CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the + RST bit set if it detects an error with the stream or the HTTP/2 connection. + +
    +
    + +
    + + This section outlines attributes of the HTTP protocol that improve interoperability, reduce + exposure to known security vulnerabilities, or reduce the potential for implementation + variation. + + +
    + + HTTP/2 connections are persistent. For best performance, it is expected clients will not + close connections until it is determined that no further communication with a server is + necessary (for example, when a user navigates away from a particular web page), or until + the server closes the connection. + + + Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, + where host is derived from a URI, a selected alternative + service, or a configured proxy. + + + A client can create additional connections as replacements, either to replace connections + that are near to exhausting the available stream + identifier space, to refresh the keying material for a TLS connection, or to + replace connections that have encountered errors. + + + A client MAY open multiple connections to the same IP address and TCP port using different + Server Name Indication values or to provide different TLS + client certificates, but SHOULD avoid creating multiple connections with the same + configuration. + + + Servers are encouraged to maintain open connections for as long as possible, but are + permitted to terminate idle connections if necessary. When either endpoint chooses to + close the transport-layer TCP connection, the terminating endpoint SHOULD first send a + GOAWAY () frame so that both endpoints can reliably + determine whether previously sent frames have been processed and gracefully complete or + terminate any necessary remaining tasks. + + +
    + + Connections that are made to an origin servers, either directly or through a tunnel + created using the CONNECT method MAY be reused for + requests with multiple different URI authority components. A connection can be reused + as long as the origin server is authoritative. For + http resources, this depends on the host having resolved to + the same IP address. + + + For https resources, connection reuse additionally depends + on having a certificate that is valid for the host in the URI. An origin server might + offer a certificate with multiple subjectAltName attributes, + or names with wildcards, one of which is valid for the authority in the URI. For + example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for + requests to URIs starting with https://a.example.com/ and + https://b.example.com/. + + + In some deployments, reusing a connection for multiple origins can result in requests + being directed to the wrong origin server. For example, TLS termination might be + performed by a middlebox that uses the TLS Server Name Indication + (SNI) extension to select an origin server. This means that it is possible + for clients to send confidential information to servers that might not be the intended + target for the request, even though the server is otherwise authoritative. + + + A server that does not wish clients to reuse connections can indicate that it is not + authoritative for a request by sending a 421 (Misdirected Request) status code in response + to the request (see ). + + + A client that is configured to use a proxy over HTTP/2 directs requests to that proxy + through a single connection. That is, all requests sent via a proxy reuse the + connection to the proxy. + +
    + +
    + + The 421 (Misdirected Request) status code indicates that the request was directed at a + server that is not able to produce a response. This can be sent by a server that is not + configured to produce responses for the combination of scheme and authority that are + included in the request URI. + + + Clients receiving a 421 (Misdirected Request) response from a server MAY retry the + request - whether the request method is idempotent or not - over a different connection. + This is possible if a connection is reused () or if an alternative + service is selected (). + + + This status code MUST NOT be generated by proxies. + + + A 421 response is cacheable by default; i.e., unless otherwise indicated by the method + definition or explicit cache controls (see ). + +
    +
    + +
    + + Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over + TLS. The general TLS usage guidance in SHOULD be followed, with + some additional restrictions that are specific to HTTP/2. + + + + An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on + feature set and cipher suite described in this section. Due to implementation + limitations, it might not be possible to fail TLS negotiation. An endpoint MUST + immediately terminate an HTTP/2 connection that does not meet these minimum requirements + with a connection error of type + INADEQUATE_SECURITY. + + +
    + + The TLS implementation MUST support the Server Name Indication + (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when + negotiating TLS. + + + The TLS implementation MUST disable compression. TLS compression can lead to the + exposure of information that would not otherwise be revealed . + Generic compression is unnecessary since HTTP/2 provides compression features that are + more aware of context and therefore likely to be more appropriate for use for + performance, security or other reasons. + + + The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS + renegotiation as a connection error of type + PROTOCOL_ERROR. Note that disabling renegotiation can result in + long-lived connections becoming unusable due to limits on the number of messages the + underlying cipher suite can encipher. + + + A client MAY use renegotiation to provide confidentiality protection for client + credentials offered in the handshake, but any renegotiation MUST occur prior to sending + the connection preface. A server SHOULD request a client certificate if it sees a + renegotiation request immediately after establishing a connection. + + + This effectively prevents the use of renegotiation in response to a request for a + specific protected resource. A future specification might provide a way to support this + use case. + +
    + +
    + + The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST + only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST + have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. + Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher + suites that use stream or block ciphers. Authenticated Encryption with Additional Data + (AEAD) modes, such as the Galois Counter Model (GCM) mode for + AES are acceptable. + + + The effect of these restrictions is that TLS 1.2 implementations could have + non-intersecting sets of available cipher suites, since these prevent the use of the + cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of + HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . + + + Clients MAY advertise support of cipher suites that are prohibited by the above + restrictions in order to allow for connection to servers that do not support HTTP/2. + This enables a fallback to protocols without these constraints without the additional + latency imposed by using a separate connection for fallback. + +
    +
    +
    + +
    +
    + + HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is + authoritative in providing a given response, see . This relies on local name resolution for the "http" + URI scheme, and the authenticated server identity for the "https" scheme (see ). + +
    + +
    + + In a cross-protocol attack, an attacker causes a client to initiate a transaction in one + protocol toward a server that understands a different protocol. An attacker might be able + to cause the transaction to appear as valid transaction in the second protocol. In + combination with the capabilities of the web context, this can be used to interact with + poorly protected servers in private networks. + + + Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient + protection against cross protocol attacks. ALPN provides a positive indication that a + server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based + protocols. + + + The encryption in TLS makes it difficult for attackers to control the data which could be + used in a cross-protocol attack on a cleartext protocol. + + + The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. + The connection preface contains a string that is + designed to confuse HTTP/1.1 servers, but no special protection is offered for other + protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an + Upgrade header field in addition to the client connection preface could be exposed to a + cross-protocol attack. + +
    + +
    + + HTTP/2 header field names and values are encoded as sequences of octets with a length + prefix. This enables HTTP/2 to carry any string of octets as the name or value of a + header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 + directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might + exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal + header fields, extra header fields, or even new messages that are entirely falsified. + + + Header field names or values that contain characters not permitted by HTTP/1.1, including + carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an + intermediary, as stipulated in . + + + Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. + Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. + +
    + +
    + + Pushed responses do not have an explicit request from the client; the request + is provided by the server in the PUSH_PROMISE frame. + + + Caching responses that are pushed is possible based on the guidance provided by the origin + server in the Cache-Control header field. However, this can cause issues if a single + server hosts more than one tenant. For example, a server might offer multiple users each + a small portion of its URI space. + + + Where multiple tenants share space on the same server, that server MUST ensure that + tenants are not able to push representations of resources that they do not have authority + over. Failure to enforce this would allow a tenant to provide a representation that would + be served out of cache, overriding the actual representation that the authoritative tenant + provides. + + + Pushed responses for which an origin server is not authoritative (see + ) are never cached or used. + +
    + +
    + + An HTTP/2 connection can demand a greater commitment of resources to operate than a + HTTP/1.1 connection. The use of header compression and flow control depend on a + commitment of resources for storing a greater amount of state. Settings for these + features ensure that memory commitments for these features are strictly bounded. + + + The number of PUSH_PROMISE frames is not constrained in the same fashion. + A client that accepts server push SHOULD limit the number of streams it allows to be in + the "reserved (remote)" state. Excessive number of server push streams can be treated as + a stream error of type + ENHANCE_YOUR_CALM. + + + Processing capacity cannot be guarded as effectively as state capacity. + + + The SETTINGS frame can be abused to cause a peer to expend additional + processing time. This might be done by pointlessly changing SETTINGS parameters, setting + multiple undefined parameters, or changing the same setting multiple times in the same + frame. WINDOW_UPDATE or PRIORITY frames can be abused to + cause an unnecessary waste of resources. + + + Large numbers of small or empty frames can be abused to cause a peer to expend time + processing frame headers. Note however that some uses are entirely legitimate, such as + the sending of an empty DATA frame to end a stream. + + + Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. + + + Limits in SETTINGS parameters cannot be reduced instantaneously, which + leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In + particular, immediately after establishing a connection, limits set by a server are not + known to clients and could be exceeded without being an obvious protocol violation. + + + All these features - i.e., SETTINGS changes, small frames, header + compression - have legitimate uses. These features become a burden only when they are + used unnecessarily or to excess. + + + An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of + service attack. Implementations SHOULD track the use of these features and set limits on + their use. An endpoint MAY treat activity that is suspicious as a connection error of type + ENHANCE_YOUR_CALM. + + +
    + + A large header block can cause an implementation to + commit a large amount of state. Header fields that are critical for routing can appear + toward the end of a header block, which prevents streaming of header fields to their + ultimate destination. For this an other reasons, such as ensuring cache correctness, + means that an endpoint might need to buffer the entire header block. Since there is no + hard limit to the size of a header block, some endpoints could be forced commit a large + amount of available memory for header fields. + + + An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of + limits that might apply on the size of header blocks. This setting is only advisory, so + endpoints MAY choose to send header blocks that exceed this limit and risk having the + request or response being treated as malformed. This setting specific to a connection, + so any request or response could encounter a hop with a lower, unknown limit. An + intermediary can attempt to avoid this problem by passing on values presented by + different peers, but they are not obligated to do so. + + + A server that receives a larger header block than it is willing to handle can send an + HTTP 431 (Request Header Fields Too Large) status code . A + client can discard responses that it cannot process. The header block MUST be processed + to ensure a consistent connection state, unless the connection is closed. + +
    +
    + +
    + + HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover + secret data when it is compressed in the same context as data under attacker control. + + + There are demonstrable attacks on compression that exploit the characteristics of the web + (e.g., ). The attacker induces multiple requests containing + varying plaintext, observing the length of the resulting ciphertext in each, which + reveals a shorter length when a guess about the secret is correct. + + + Implementations communicating on a secure channel MUST NOT compress content that includes + both confidential and attacker-controlled data unless separate compression dictionaries + are used for each source of data. Compression MUST NOT be used if the source of data + cannot be reliably determined. Generic stream compression, such as that provided by TLS + MUST NOT be used with HTTP/2 (). + + + Further considerations regarding the compression of header fields are described in . + +
    + +
    + + Padding within HTTP/2 is not intended as a replacement for general purpose padding, such + as might be provided by TLS. Redundant padding could even be + counterproductive. Correct application can depend on having specific knowledge of the + data that is being padded. + + + To mitigate attacks that rely on compression, disabling or limiting compression might be + preferable to padding as a countermeasure. + + + Padding can be used to obscure the exact size of frame content, and is provided to + mitigate specific attacks within HTTP. For example, attacks where compressed content + includes both attacker-controlled plaintext and secret data (see for example, ). + + + Use of padding can result in less protection than might seem immediately obvious. At + best, padding only makes it more difficult for an attacker to infer length information by + increasing the number of frames an attacker has to observe. Incorrectly implemented + padding schemes can be easily defeated. In particular, randomized padding with a + predictable distribution provides very little protection; similarly, padding payloads to a + fixed size exposes information as payload sizes cross the fixed size boundary, which could + be possible if an attacker can control plaintext. + + + Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding + for HEADERS and PUSH_PROMISE frames. A valid reason for an + intermediary to change the amount of padding of frames is to improve the protections that + padding provides. + +
    + +
    + + Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions + of a single client or server over time. This includes the value of settings, the manner + in which flow control windows are managed, the way priorities are allocated to streams, + timing of reactions to stimulus, and handling of any optional features. + + + As far as this creates observable differences in behavior, they could be used as a basis + for fingerprinting a specific client, as defined in . + +
    +
    + +
    + + A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation + (ALPN) Protocol IDs" registry established in . + + + This document establishes a registry for frame types, settings, and error codes. These new + registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. + + + This document registers the HTTP2-Settings header field for + use in HTTP; and the 421 (Misdirected Request) status code. + + + This document registers the PRI method for use in HTTP, to avoid + collisions with the connection preface. + + +
    + + This document creates two registrations for the identification of HTTP/2 in the + "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . + + + The "h2" string identifies HTTP/2 when used over TLS: + + HTTP/2 over TLS + 0x68 0x32 ("h2") + This document + + + + The "h2c" string identifies HTTP/2 when used over cleartext TCP: + + HTTP/2 over TCP + 0x68 0x32 0x63 ("h2c") + This document + + +
    + +
    + + This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame + Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under + either of the "IETF Review" or "IESG Approval" policies for + values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for + experimental use. + + + New entries in this registry require the following information: + + + A name or label for the frame type. + + + The 8-bit code assigned to the frame type. + + + A reference to a specification that includes a description of the frame layout, + it's semantics and flags that the frame type uses, including any parts of the frame + that are conditionally present based on the value of flags. + + + + + The entries in the following table are registered by this document. + + + Frame Type + Code + Section + DATA0x0 + HEADERS0x1 + PRIORITY0x2 + RST_STREAM0x3 + SETTINGS0x4 + PUSH_PROMISE0x5 + PING0x6 + GOAWAY0x7 + WINDOW_UPDATE0x8 + CONTINUATION0x9 + +
    + +
    + + This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry + manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to + 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. + + + New registrations are advised to provide the following information: + + + A symbolic name for the setting. Specifying a setting name is optional. + + + The 16-bit code assigned to the setting. + + + An initial value for the setting. + + + An optional reference to a specification that describes the use of the setting. + + + + + An initial set of setting registrations can be found in . + + + Name + Code + Initial Value + Specification + HEADER_TABLE_SIZE + 0x14096 + ENABLE_PUSH + 0x21 + MAX_CONCURRENT_STREAMS + 0x3(infinite) + INITIAL_WINDOW_SIZE + 0x465535 + MAX_FRAME_SIZE + 0x516384 + MAX_HEADER_LIST_SIZE + 0x6(infinite) + + +
    + +
    + + This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" + registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the + "Expert Review" policy. + + + Registrations for error codes are required to include a description of the error code. An + expert reviewer is advised to examine new registrations for possible duplication with + existing error codes. Use of existing registrations is to be encouraged, but not + mandated. + + + New registrations are advised to provide the following information: + + + A name for the error code. Specifying an error code name is optional. + + + The 32-bit error code value. + + + A brief description of the error code semantics, longer if no detailed specification + is provided. + + + An optional reference for a specification that defines the error code. + + + + + The entries in the following table are registered by this document. + + + Name + Code + Description + Specification + NO_ERROR0x0 + Graceful shutdown + + PROTOCOL_ERROR0x1 + Protocol error detected + + INTERNAL_ERROR0x2 + Implementation fault + + FLOW_CONTROL_ERROR0x3 + Flow control limits exceeded + + SETTINGS_TIMEOUT0x4 + Settings not acknowledged + + STREAM_CLOSED0x5 + Frame received for closed stream + + FRAME_SIZE_ERROR0x6 + Frame size incorrect + + REFUSED_STREAM0x7 + Stream not processed + + CANCEL0x8 + Stream cancelled + + COMPRESSION_ERROR0x9 + Compression state not updated + + CONNECT_ERROR0xa + TCP connection error for CONNECT method + + ENHANCE_YOUR_CALM0xb + Processing capacity exceeded + + INADEQUATE_SECURITY0xc + Negotiated TLS parameters not acceptable + + + +
    + +
    + + This section registers the HTTP2-Settings header field in the + Permanent Message Header Field Registry. + + + HTTP2-Settings + + + http + + + standard + + + IETF + + + of this document + + + This header field is only used by an HTTP/2 client for Upgrade-based negotiation. + + + +
    + +
    + + This section registers the PRI method in the HTTP Method + Registry (). + + + PRI + + + No + + + No + + + of this document + + + This method is never used by an actual client. This method will appear to be used + when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection + preface. + + + +
    + +
    + + This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext + Transfer Protocol (HTTP) Status Code Registry (). + + + + + 421 + + + Misdirected Request + + + of this document + + + +
    + +
    + +
    + + This document includes substantial input from the following individuals: + + + Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin + Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin + Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY + contributors). + + + Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). + + + William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto + Peon, Rob Trace (Flow control). + + + Mike Bishop (Extensibility). + + + Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan + (Substantial editorial contributions). + + + Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. + + + Alexey Melnikov was an editor of this document during 2013. + + + A substantial proportion of Martin's contribution was supported by Microsoft during his + employment there. + + + +
    +
    + + + + + + HPACK - Header Compression for HTTP/2 + + + + + + + + + + + + Transmission Control Protocol + + + University of Southern California (USC)/Information Sciences + Institute + + + + + + + + + + + Key words for use in RFCs to Indicate Requirement Levels + + + Harvard University +
    sob@harvard.edu
    +
    + +
    + + +
    + + + + + HTTP Over TLS + + + + + + + + + + Uniform Resource Identifier (URI): Generic + Syntax + + + + + + + + + + + + The Base16, Base32, and Base64 Data Encodings + + + + + + + + + Guidelines for Writing an IANA Considerations Section in RFCs + + + + + + + + + + + Augmented BNF for Syntax Specifications: ABNF + + + + + + + + + + + The Transport Layer Security (TLS) Protocol Version 1.2 + + + + + + + + + + + Transport Layer Security (TLS) Extensions: Extension Definitions + + + + + + + + + + Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension + + + + + + + + + + + + + TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois + Counter Mode (GCM) + + + + + + + + + + + Digital Signature Standard (DSS) + + NIST + + + + + + + + + Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Range Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + World Wide Web Consortium +
    ylafon@w3.org
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Caching + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + Akamai +
    mnot@mnot.net
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Authentication + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + HTTP State Management Mechanism + + + + + +
    + + + + + + TCP Extensions for High Performance + + + + + + + + + + + + Transport Layer Security Protocol Compression Methods + + + + + + + + + Additional HTTP Status Codes + + + + + + + + + + + Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) + + + + + + + + + + + + + + + AES Galois Counter Mode (GCM) Cipher Suites for TLS + + + + + + + + + + + + HTML5 + + + + + + + + + + + Latest version available at + . + + + + + + + Talking to Yourself for Fun and Profit + + + + + + + + + + + + + + BREACH: Reviving the CRIME Attack + + + + + + + + + + + Registration Procedures for Message Header Fields + + Nine by Nine +
    GK-IETF@ninebynine.org
    +
    + + BEA Systems +
    mnot@pobox.com
    +
    + + HP Labs +
    JeffMogul@acm.org
    +
    + +
    + + +
    + + + + Recommendations for Secure Use of TLS and DTLS + + + + + + + + + + + + + + + + + + HTTP Alternative Services + + + Akamai + + + Mozilla + + + greenbytes + + + + + + +
    + +
    + + This section is to be removed by RFC Editor before publication. + + +
    + + Renamed Not Authoritative status code to Misdirected Request. + +
    + +
    + + Pseudo-header fields are now required to appear strictly before regular ones. + + + Restored 1xx series status codes, except 101. + + + Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting + to limit the damage. + + + Added a setting to advise peers of header set size limits. + + + Removed segments. + + + Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. + +
    + +
    + + Restored extensibility options. + + + Restricting TLS cipher suites to AEAD only. + + + Removing Content-Encoding requirements. + + + Permitting the use of PRIORITY after stream close. + + + Removed ALTSVC frame. + + + Removed BLOCKED frame. + + + Reducing the maximum padding size to 256 octets; removing padding from + CONTINUATION frames. + + + Removed per-frame GZIP compression. + +
    + +
    + + Added BLOCKED frame (at risk). + + + Simplified priority scheme. + + + Added DATA per-frame GZIP compression. + +
    + +
    + + Changed "connection header" to "connection preface" to avoid confusion. + + + Added dependency-based stream prioritization. + + + Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. + + + Adding missing padding to PUSH_PROMISE. + + + Integrate ALTSVC frame and supporting text. + + + Dropping requirement on "deflate" Content-Encoding. + + + Improving security considerations around use of compression. + +
    + +
    + + Adding padding for data frames. + + + Renumbering frame types, error codes, and settings. + + + Adding INADEQUATE_SECURITY error code. + + + Updating TLS usage requirements to 1.2; forbidding TLS compression. + + + Removing extensibility for frames and settings. + + + Changing setting identifier size. + + + Removing the ability to disable flow control. + + + Changing the protocol identification token to "h2". + + + Changing the use of :authority to make it optional and to allow userinfo in non-HTTP + cases. + + + Allowing split on 0x0 for Cookie. + + + Reserved PRI method in HTTP/1.1 to avoid possible future collisions. + +
    + +
    + + Added cookie crumbling for more efficient header compression. + + + Added header field ordering with the value-concatenation mechanism. + +
    + +
    + + Marked draft for implementation. + +
    + +
    + + Adding definition for CONNECT method. + + + Constraining the use of push to safe, cacheable methods with no request body. + + + Changing from :host to :authority to remove any potential confusion. + + + Adding setting for header compression table size. + + + Adding settings acknowledgement. + + + Removing unnecessary and potentially problematic flags from CONTINUATION. + + + Added denial of service considerations. + +
    +
    + + Marking the draft ready for implementation. + + + Renumbering END_PUSH_PROMISE flag. + + + Editorial clarifications and changes. + +
    + +
    + + Added CONTINUATION frame for HEADERS and PUSH_PROMISE. + + + PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is + zero. + + + Push expanded to allow all safe methods without a request body. + + + Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 + hop-by-hop header fields. + + + Requiring that intermediaries not forward requests with missing or illegal routing + :-headers. + + + Clarified requirements around handling different frames after stream close, stream reset + and GOAWAY. + + + Added more specific prohibitions for sending of different frame types in various stream + states. + + + Making the last received setting value the effective value. + + + Clarified requirements on TLS version, extension and ciphers. + +
    + +
    + + Committed major restructuring atrocities. + + + Added reference to first header compression draft. + + + Added more formal description of frame lifecycle. + + + Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. + + + Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. + + + Added PRIORITY frame. + +
    + +
    + + Added continuations to frames carrying header blocks. + + + Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful + concepts, like cookies. + + + Removed "message". + + + Switched to TLS ALPN from NPN. + + + Editorial changes. + +
    + +
    + + Added IANA considerations section for frame types, error codes and settings. + + + Removed data frame compression. + + + Added PUSH_PROMISE. + + + Added globally applicable flags to framing. + + + Removed zlib-based header compression mechanism. + + + Updated references. + + + Clarified stream identifier reuse. + + + Removed CREDENTIALS frame and associated mechanisms. + + + Added advice against naive implementation of flow control. + + + Added session header section. + + + Restructured frame header. Removed distinction between data and control frames. + + + Altered flow control properties to include session-level limits. + + + Added note on cacheability of pushed resources and multiple tenant servers. + + + Changed protocol label form based on discussions. + +
    + +
    + + Changed title throughout. + + + Removed section on Incompatibilities with SPDY draft#2. + + + Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . + + + Replaced abstract and introduction. + + + Added section on starting HTTP/2.0, including upgrade mechanism. + + + Removed unused references. + + + Added flow control principles based on . + +
    + +
    + + Adopted as base for draft-ietf-httpbis-http2. + + + Updated authors/editors list. + + + Added status note. + +
    +
    + +
    +
    + diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 2c5e7acdf21..2b1f3a44ef8 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2,67 +2,253 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Transport code. + package http2 import ( "bufio" "bytes" + "compress/gzip" "crypto/tls" "errors" "fmt" "io" + "io/ioutil" "log" "net" "net/http" + "sort" "strconv" "strings" "sync" + "time" "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" ) // Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. type Transport struct { - // TODO: remove this and make more general with a TLS dial hook, like http - InsecureTLSDial bool + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allow. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} - connMu sync.Mutex - conns map[string][]*clientConn // key is host:port +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef } -type clientConn struct { - t *Transport - tconn *tls.Conn - tlsState *tls.ConnectionState - connKey []string // key(s) this connection is cached in, in t.conns +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + // readLoop goroutine fields: readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed - hdec *hpack.Decoder - nextRes *http.Response - mu sync.Mutex + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control closed bool - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - streams map[uint32]*clientStream + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated nextStreamID uint32 bw *bufio.Writer - werr error // first write error that has occurred br *bufio.Reader fr *Framer + lastActive time.Time + // Settings from peer: maxFrameSize uint32 maxConcurrentStreams uint32 initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred } +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. type clientStream struct { - ID uint32 - resc chan resAndError - pw *io.PipeWriter - pr *io.PipeReader + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel runs in its own goroutine and waits for the user +// to cancel a RoundTrip request, its context to expire, or for the +// request to be done (any way it might be removed from the cc.streams +// map: peer reset, successful completion, TCP connection breakage, +// etc) +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return + } + select { + case <-req.Cancel: + cs.bufPipe.CloseWithError(errRequestCanceled) + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-ctx.Done(): + cs.bufPipe.CloseWithError(ctx.Err()) + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-cs.done: + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() } type stickyErrWriter struct { @@ -79,27 +265,54 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { return } +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - if req.URL.Scheme != "https" { - return nil, errors.New("http2: unsupported scheme") + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + if _, _, err := net.SplitHostPort(authority); err == nil { + return authority + } + port := "443" + if scheme == "http" { + port = "80" } + return net.JoinHostPort(authority, port) +} - host, port, err := net.SplitHostPort(req.URL.Host) - if err != nil { - host = req.URL.Host - port = "443" +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") } + addr := authorityAddr(req.URL.Scheme, req.URL.Host) for { - cc, err := t.getClientConn(host, port) + cc, err := t.connPool().GetClientConn(req, addr) if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - res, err := cc.roundTrip(req) - if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)? + traceGotConn(req, cc) + res, err := cc.RoundTrip(req) + if shouldRetryRequest(req, err) { continue } if err != nil { + t.vlogf("RoundTrip failure: %v", err) return nil, err } return res, nil @@ -110,141 +323,103 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { // connected from previous requests but are now sitting idle. // It does not interrupt any connections currently in use. func (t *Transport) CloseIdleConnections() { - t.connMu.Lock() - defer t.connMu.Unlock() - for _, vv := range t.conns { - for _, cc := range vv { - cc.closeIfIdle() - } + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() } } -var errClientConnClosed = errors.New("http2: client conn is closed") - -func shouldRetryRequest(err error) bool { - // TODO: or GOAWAY graceful shutdown stuff - return err == errClientConnClosed -} - -func (t *Transport) removeClientConn(cc *clientConn) { - t.connMu.Lock() - defer t.connMu.Unlock() - for _, key := range cc.connKey { - vv, ok := t.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - t.conns[key] = newList - } else { - delete(t.conns, key) - } - } -} +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") +) -func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - return out +func shouldRetryRequest(req *http.Request, err error) bool { + // TODO: retry GET requests (no bodies) more aggressively, if shutdown + // before response. + return err == errClientConnUnusable } -// AddIdleConn adds c as an idle conn for Transport. -// It assumes that c has not yet exchanged SETTINGS frames. -// The addr maybe be either "host" or "host:port". -func (t *Transport) AddIdleConn(addr string, c *tls.Conn) error { - var key string +func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { host, _, err := net.SplitHostPort(addr) - if err == nil { - key = addr - } else { - host = addr - key = addr + ":443" + if err != nil { + return nil, err } - cc, err := t.newClientConn(host, key, c) + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) if err != nil { - return err + return nil, err } - - t.addConn(key, cc) - return nil + return t.NewClientConn(tconn) } -func (t *Transport) addConn(key string, cc *clientConn) { - t.connMu.Lock() - defer t.connMu.Unlock() - if t.conns == nil { - t.conns = make(map[string][]*clientConn) +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig } - t.conns[key] = append(t.conns[key], cc) -} - -func (t *Transport) getClientConn(host, port string) (*clientConn, error) { - key := net.JoinHostPort(host, port) - - t.connMu.Lock() - for _, cc := range t.conns[key] { - if cc.canTakeNewRequest() { - t.connMu.Unlock() - return cc, nil - } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) } - t.connMu.Unlock() - - // TODO(bradfitz): use a singleflight.Group to only lock once per 'key'. - // Probably need to vendor it in as github.com/golang/sync/singleflight - // though, since the net package already uses it? Also lines up with - // sameer, bcmills, et al wanting to open source some sync stuff. - cc, err := t.dialClientConn(host, port, key) - if err != nil { - return nil, err + if cfg.ServerName == "" { + cfg.ServerName = host } - t.addConn(key, cc) - return cc, nil + return cfg } -func (t *Transport) dialClientConn(host, port, key string) (*clientConn, error) { - cfg := &tls.Config{ - ServerName: host, - NextProtos: []string{NextProtoTLS}, - InsecureSkipVerify: t.InsecureTLSDial, +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS } - tconn, err := tls.Dial("tcp", net.JoinHostPort(host, port), cfg) + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) if err != nil { return nil, err } - return t.newClientConn(host, key, tconn) -} - -func (t *Transport) newClientConn(host, key string, tconn *tls.Conn) (*clientConn, error) { - if err := tconn.Handshake(); err != nil { + if err := cn.Handshake(); err != nil { return nil, err } - if !t.InsecureTLSDial { - if err := tconn.VerifyHostname(host); err != nil { + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { return nil, err } } - state := tconn.ConnectionState() + state := cn.ConnectionState() if p := state.NegotiatedProtocol; p != NextProtoTLS { return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) } if !state.NegotiatedProtocolIsMutual { return nil, errors.New("http2: could not negotiate protocol mutually") } - if _, err := tconn.Write(clientPreface); err != nil { + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + if VerboseLogs { + t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) + } + if _, err := c.Write(clientPreface); err != nil { + t.vlogf("client preface write error: %v", err) return nil, err } - cc := &clientConn{ + cc := &ClientConn{ t: t, - tconn: tconn, - connKey: []string{key}, // TODO: cert's validated hostnames too - tlsState: &state, + tconn: c, readerDone: make(chan struct{}), nextStreamID: 1, maxFrameSize: 16 << 10, // spec default @@ -252,14 +427,36 @@ func (t *Transport) newClientConn(host, key string, tconn *tls.Conn) (*clientCon maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. streams: make(map[uint32]*clientStream), } - cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr}) - cc.br = bufio.NewReader(tconn) + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.fr.WriteSettings() - // TODO: re-send more conn-level flow control tokens when server uses all these. - cc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs? + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { return nil, cc.werr @@ -286,33 +483,48 @@ func (t *Transport) newClientConn(host, key string, tconn *tls.Conn) (*clientCon case SettingInitialWindowSize: cc.initialWindowSize = s.Val default: - // TODO(bradfitz): handle more + // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? t.vlogf("Unhandled Setting: %v", s) } return nil }) - // TODO: figure out henc size - cc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField) go cc.readLoop() return cc, nil } -func (cc *clientConn) setGoAway(f *GoAwayFrame) { +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() + + old := cc.goAway cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } } -func (cc *clientConn) canTakeNewRequest() bool { +func (cc *ClientConn) CanTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() - return cc.goAway == nil && + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && cc.nextStreamID < 2147483647 } -func (cc *clientConn) closeIfIdle() { +func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 { cc.mu.Unlock() @@ -325,95 +537,588 @@ func (cc *clientConn) closeIfIdle() { cc.tconn.Close() } -func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) { +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + // TODO: could do better allocation-wise here, but trailers are rare, + // so being lazy for now. + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return errors.New("http2: invalid Upgrade request header") + } + if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { + return errors.New("http2: invalid Transfer-Encoding request header") + } + if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { + return errors.New("http2: invalid Connection request header") + } + return nil +} + +func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) { + body = req.Body + if body == nil { + return nil, 0 + } + if req.ContentLength != 0 { + return req.Body, req.ContentLength + } + + // We have a body but a zero content length. Test to see if + // it's actually zero or just unset. + var buf [1]byte + n, rerr := io.ReadFull(body, buf[:]) + if rerr != nil && rerr != io.EOF { + return errorReader{rerr}, -1 + } + if n == 1 { + // Oh, guess there is data in this Body Reader after all. + // The ContentLength field just wasn't set. + // Stich the Body back together again, re-attaching our + // consumed byte. + return io.MultiReader(bytes.NewReader(buf[:]), body), -1 + } + // Body is actually zero bytes. + return nil, 0 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, err + } + hasTrailers := trailers != "" + + body, contentLen := bodyAndLength(req) + hasBody := body != nil + cc.mu.Lock() + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + cc.mu.Unlock() + return nil, errClientConnUnusable + } + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } - if cc.closed { + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { cc.mu.Unlock() - return nil, errClientConnClosed + return nil, err } cs := cc.newStream() - hasBody := false // TODO + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() - // we send: HEADERS[+CONTINUATION] + (DATA?) - hdrs := cc.encodeHeaders(req) - first := true - for len(hdrs) > 0 { + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errTimeout + case <-ctx.Done(): + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, ctx.Err() + case <-req.Cancel: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + frameSize := int(cc.maxFrameSize) + for len(hdrs) > 0 && cc.werr == nil { chunk := hdrs - if len(chunk) > int(cc.maxFrameSize) { - chunk = chunk[:cc.maxFrameSize] + if len(chunk) > frameSize { + chunk = chunk[:frameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 if first { cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: cs.ID, + StreamID: streamID, BlockFragment: chunk, - EndStream: !hasBody, + EndStream: endStream, EndHeaders: endHeaders, }) first = false } else { - cc.fr.WriteContinuation(cs.ID, endHeaders, chunk) + cc.fr.WriteContinuation(streamID, endHeaders, chunk) } } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. cc.bw.Flush() - werr := cc.werr - cc.mu.Unlock() + return cc.werr +} - if hasBody { - // TODO: write data. and it should probably be interleaved: - // go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or opt-out? + // Use some heuristic on the body type? Nagel-like timers? + // Based on 'n'? Only last chunk of this for loop, unless flow control + // tokens are low? For now, always: + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } } - if werr != nil { - return nil, werr + cc.wmu.Lock() + if !sentEnd { + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls = cc.encodeTrailers(req) + cc.mu.Unlock() + } + + // Avoid forgetting to send an END_STREAM if the encoded + // trailers are 0 bytes. Both results produce and END_STREAM. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + cc.wmu.Unlock() + + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { - re := <-cs.resc - if re.err != nil { - return nil, re.err + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() } - res := re.res - res.Request = req - res.TLS = cc.tlsState - return res, nil } +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + // requires cc.mu be held. -func (cc *clientConn) encodeHeaders(req *http.Request) []byte { +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() - // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go host := req.Host if host == "" { host = req.URL.Host } - path := req.URL.Path - if path == "" { - path = "/" + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } } - cc.writeHeader(":authority", host) // probably not right for all sites + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + cc.writeHeader(":authority", host) cc.writeHeader(":method", req.Method) - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", "https") + if req.Method != "CONNECT" { + cc.writeHeader(":path", req.URL.RequestURI()) + cc.writeHeader(":scheme", "https") + } + if trailers != "" { + cc.writeHeader("trailer", trailers) + } + var didUA bool for k, vv := range req.Header { lowKey := strings.ToLower(k) - if lowKey == "host" { + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. continue + case "user-agent": + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } } for _, v := range vv { cc.writeHeader(lowKey, v) } } + if shouldSendReqContentLength(req.Method, contentLength) { + cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + cc.writeHeader("accept-encoding", "gzip") + } + if !didUA { + cc.writeHeader("user-agent", defaultUserAgent) + } + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { + cc.hbuf.Reset() + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filter at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } return cc.hbuf.Bytes() } -func (cc *clientConn) writeHeader(name, value string) { +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) } @@ -423,173 +1128,621 @@ type resAndError struct { } // requires cc.mu be held. -func (cc *clientConn) newStream() *clientStream { +func (cc *ClientConn) newStream() *clientStream { cs := &clientStream{ - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) cc.nextStreamID += 2 cc.streams[cs.ID] = cs return cs } -func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream { +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.mu.Lock() defer cc.mu.Unlock() cs := cc.streams[id] - if andRemove { + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() delete(cc.streams, id) + close(cs.done) + cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } return cs } -// runs in its own goroutine. -func (cc *clientConn) readLoop() { - defer cc.t.removeClientConn(cc) +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{ + cc: cc, + activeRes: make(map[uint32]*clientStream), + } + + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) defer close(cc.readerDone) - activeRes := map[uint32]*clientStream{} // keyed by streamID // Close any response bodies if the server closes prematurely. // TODO: also do this if we've written the headers but not // gotten a response yet. - defer func() { - err := cc.readerErr - if err == io.EOF { + err := cc.readerErr + cc.mu.Lock() + if err == io.EOF { + if cc.goAway != nil { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else { err = io.ErrUnexpectedEOF } - for _, cs := range activeRes { - cs.pw.CloseWithError(err) + } + for _, cs := range rl.activeRes { + cs.bufPipe.CloseWithError(err) + } + for _, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: } - }() - - // continueStreamID is the stream ID we're waiting for - // continuation frames for. - var continueStreamID uint32 + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a reply for { f, err := cc.fr.ReadFrame() if err != nil { - cc.readerErr = err - return + cc.vlogf("Transport readFrame error: (%T) %v", err, err) } - cc.vlogf("Transport received %v: %#v", f.Header(), f) - - streamID := f.Header().StreamID - - _, isContinue := f.(*ContinuationFrame) - if isContinue { - if streamID != continueStreamID { - cc.logf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, continueStreamID) - cc.readerErr = ConnectionError(ErrCodeProtocol) - return + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + rl.endStreamError(cs, cc.fr.errDetail) } - } else if continueStreamID != 0 { - // Continue frames need to be adjacent in the stream - // and we were in the middle of headers. - cc.logf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, continueStreamID) - cc.readerErr = ConnectionError(ErrCodeProtocol) - return - } - - if streamID%2 == 0 { - // Ignore streams pushed from the server for now. - // These always have an even stream id. continue + } else if err != nil { + return err } - streamEnded := false - if ff, ok := f.(streamEnder); ok { - streamEnded = ff.StreamEnded() - } - - cs := cc.streamByID(streamID, streamEnded) - if cs == nil { - cc.logf("Received frame for untracked stream ID %d", streamID) - continue + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) } + maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { - case *HeadersFrame: - cc.nextRes = &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: make(http.Header), - } - cs.pr, cs.pw = io.Pipe() - cc.hdec.Write(f.HeaderBlockFragment()) - case *ContinuationFrame: - cc.hdec.Write(f.HeaderBlockFragment()) + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true case *DataFrame: - if VerboseLogs { - cc.logf("DATA: %q", f.Data()) - } - cs.pw.Write(f.Data()) + err = rl.processData(f) + maybeIdle = true case *GoAwayFrame: - cc.t.removeClientConn(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) default: cc.logf("Transport: unhandled response frame type %T", f) } - headersEnded := false - if he, ok := f.(headersEnder); ok { - headersEnded = he.HeadersEnded() - if headersEnded { - continueStreamID = 0 - } else { - continueStreamID = streamID - } + if err != nil { + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() } + } +} - if streamEnded { - cs.pw.Close() - delete(activeRes, streamID) +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) } - if headersEnded { - if cs == nil { - panic("couldn't find stream") // TODO be graceful + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t } - // TODO: set the Body to one which notes the - // Close and also sends the server a - // RST_STREAM - cc.nextRes.Body = cs.pr - res := cc.nextRes - activeRes[streamID] = cs - cs.resc <- resAndError{res: res} + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. } } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage + cs.bufPipe = pipe{b: buf} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil } -func (cc *clientConn) onNewHeaderField(f hpack.HeaderField) { - // TODO: verifiy pseudo headers come before non-pseudo headers - // TODO: verifiy the status is set - if VerboseLogs { - cc.logf("Header field: %+v", f) +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) } - if f.Name == ":status" { - code, err := strconv.Atoi(f.Value) - if err != nil { - panic("TODO: be graceful") + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err } - cc.nextRes.Status = f.Value + " " + http.StatusText(code) - cc.nextRes.StatusCode = code - return } - if strings.HasPrefix(f.Name, ":") { - // "Endpoints MUST NOT generate pseudo-header fields other than those defined in this document." - // TODO: treat as invalid? + if n == 0 { + // No flow control tokens to send back. return } - cc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value) + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return } -func (cc *clientConn) logf(format string, args ...interface{}) { +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + if cs.bufPipe.Err() != io.EOF { + // TODO: write test for this + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } + cs.bufPipe.BreakWithError(errClosedResponseBody) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + return nil + } + if data := f.Data(); len(data) > 0 { + if cs.bufPipe.b == nil { + // Data frame after it's already closed? + cc.logf("http2: Transport received DATA frame for closed stream; closing connection") + return ConnectionError(ErrCodeProtocol) + } + + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(len(data)) { + cs.inflow.take(int32(len(data))) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + cc.mu.Unlock() + + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) + delete(rl.activeRes, cs.ID) + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + // TODO: error if this is too large. + + // TODO: adjust flow control of still-open + // frames by the difference of the old initial + // window size and this one. + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := StreamError{cs.ID, f.ErrCode} + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + delete(rl.activeRes, cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: do something with err? send it as a debug frame to the peer? + // But that's only in GOAWAY. Invent a new frame type? Is there one already? + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { cc.t.logf(format, args...) } -func (cc *clientConn) vlogf(format string, args ...interface{}) { +func (cc *ClientConn) vlogf(format string, args ...interface{}) { cc.t.vlogf(format, args...) } @@ -602,3 +1755,130 @@ func (t *Transport) vlogf(format string, args ...interface{}) { func (t *Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go new file mode 100644 index 00000000000..5887714896b --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport_test.go @@ -0,0 +1,2099 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var ( + extNet = flag.Bool("extnet", false, "do external network tests") + transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") + insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove? +) + +var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true} + +func TestTransportExternal(t *testing.T) { + if !*extNet { + t.Skip("skipping external network test") + } + req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) + rt := &Transport{TLSClientConfig: tlsConfigInsecure} + res, err := rt.RoundTrip(req) + if err != nil { + t.Fatalf("%v", err) + } + res.Write(os.Stdout) +} + +func startH2cServer(t *testing.T) net.Listener { + h2Server := &Server{} + l := newLocalListener(t) + go func() { + conn, err := l.Accept() + if err != nil { + t.Error(err) + return + } + h2Server.ServeConn(conn, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %v", r.URL.Path) + })}) + }() + return l +} + +func TestTransportH2c(t *testing.T) { + l := startH2cServer(t) + defer l.Close() + req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil) + if err != nil { + t.Fatal(err) + } + tr := &Transport{ + AllowHTTP: true, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + return net.Dial(network, addr) + }, + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.ProtoMajor != 2 { + t.Fatal("proto not h2c") + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(body), "Hello, /foobar"; got != want { + t.Fatalf("response got %v, want %v", got, want) + } +} + +func TestTransport(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, body) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + t.Logf("Got res: %+v", res) + if g, w := res.StatusCode, 200; g != w { + t.Errorf("StatusCode = %v; want %v", g, w) + } + if g, w := res.Status, "200 OK"; g != w { + t.Errorf("Status = %q; want %q", g, w) + } + wantHeader := http.Header{ + "Content-Length": []string{"3"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + "Date": []string{"XXX"}, // see cleanDate + } + cleanDate(res) + if !reflect.DeepEqual(res.Header, wantHeader) { + t.Errorf("res Header = %v; want %v", res.Header, wantHeader) + } + if res.Request != req { + t.Errorf("Response.Request = %p; want %p", res.Request, req) + } + if res.TLS == nil { + t.Error("Response.TLS = nil; want non-nil") + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } else if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func onSameConn(t *testing.T, modReq func(*http.Request)) bool { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer, func(c net.Conn, st http.ConnState) { + t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + }) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + get := func() string { + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + modReq(req) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Fatalf("didn't get an addr in response") + } + return addr + } + first := get() + second := get() + return first == second +} + +func TestTransportReusesConns(t *testing.T) { + if !onSameConn(t, func(*http.Request) {}) { + t.Errorf("first and second responses were on different connections") + } +} + +func TestTransportReusesConn_RequestClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Close = true }) { + t.Errorf("first and second responses were not on different connections") + } +} + +func TestTransportReusesConn_ConnClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) { + t.Errorf("first and second responses were not on different connections") + } +} + +// Tests that the Transport only keeps one pending dial open per destination address. +// https://golang.org/issue/13397 +func TestTransportGroupsPendingDials(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer) + defer st.Close() + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + defer tr.CloseIdleConnections() + var ( + mu sync.Mutex + dials = map[string]int{} + ) + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Error(err) + return + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Errorf("didn't get an addr in response") + } + mu.Lock() + dials[addr]++ + mu.Unlock() + }() + } + wg.Wait() + if len(dials) != 1 { + t.Errorf("saw %d dials; want 1: %v", len(dials), dials) + } + tr.CloseIdleConnections() + if err := retry(50, 10*time.Millisecond, func() error { + cp, ok := tr.connPool().(*clientConnPool) + if !ok { + return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool()) + } + cp.mu.Lock() + defer cp.mu.Unlock() + if len(cp.dialing) != 0 { + return fmt.Errorf("dialing map = %v; want empty", cp.dialing) + } + if len(cp.conns) != 0 { + return fmt.Errorf("conns = %v; want empty", cp.conns) + } + if len(cp.keys) != 0 { + return fmt.Errorf("keys = %v; want empty", cp.keys) + } + return nil + }); err != nil { + t.Errorf("State of pool after CloseIdleConnections: %v", err) + } +} + +func retry(tries int, delay time.Duration, fn func() error) error { + var err error + for i := 0; i < tries; i++ { + err = fn() + if err == nil { + return nil + } + time.Sleep(delay) + } + return err +} + +func TestTransportAbortClosesPipes(t *testing.T) { + shutdown := make(chan struct{}) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + <-shutdown + }, + optOnlyServer, + ) + defer st.Close() + defer close(shutdown) // we must shutdown before st.Close() to avoid hanging + + done := make(chan struct{}) + requestMade := make(chan struct{}) + go func() { + defer close(done) + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + close(requestMade) + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Error("expected error from res.Body.Read") + } + }() + + <-requestMade + // Now force the serve loop to end, via closing the connection. + st.closeConn() + // deadlock? that's a bug. + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("timeout") + } +} + +// TODO: merge this with TestTransportBody to make TestTransportRequest? This +// could be a table-driven test with extra goodies. +func TestTransportPath(t *testing.T) { + gotc := make(chan *url.URL, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + gotc <- r.URL + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + const ( + path = "/testpath" + query = "q=1" + ) + surl := st.ts.URL + path + "?" + query + req, err := http.NewRequest("POST", surl, nil) + if err != nil { + t.Fatal(err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + got := <-gotc + if got.Path != path { + t.Errorf("Read Path = %q; want %q", got.Path, path) + } + if got.RawQuery != query { + t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query) + } +} + +func randString(n int) string { + rnd := rand.New(rand.NewSource(int64(n))) + b := make([]byte, n) + for i := range b { + b[i] = byte(rnd.Intn(256)) + } + return string(b) +} + +func TestTransportBody(t *testing.T) { + bodyTests := []struct { + body string + noContentLen bool + }{ + {body: "some message"}, + {body: "some message", noContentLen: true}, + {body: ""}, + {body: "", noContentLen: true}, + {body: strings.Repeat("a", 1<<20), noContentLen: true}, + {body: strings.Repeat("a", 1<<20)}, + {body: randString(16<<10 - 1)}, + {body: randString(16 << 10)}, + {body: randString(16<<10 + 1)}, + {body: randString(512<<10 - 1)}, + {body: randString(512 << 10)}, + {body: randString(512<<10 + 1)}, + {body: randString(1<<20 - 1)}, + {body: randString(1 << 20)}, + {body: randString(1<<20 + 2)}, + } + + type reqInfo struct { + req *http.Request + slurp []byte + err error + } + gotc := make(chan reqInfo, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + gotc <- reqInfo{err: err} + } else { + gotc <- reqInfo{req: r, slurp: slurp} + } + }, + optOnlyServer, + ) + defer st.Close() + + for i, tt := range bodyTests { + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + var body io.Reader = strings.NewReader(tt.body) + if tt.noContentLen { + body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods + } + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + defer res.Body.Close() + ri := <-gotc + if ri.err != nil { + t.Errorf("#%d: read error: %v", i, ri.err) + continue + } + if got := string(ri.slurp); got != tt.body { + t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body)) + } + wantLen := int64(len(tt.body)) + if tt.noContentLen && tt.body != "" { + wantLen = -1 + } + if ri.req.ContentLength != wantLen { + t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen) + } + } +} + +func shortString(v string) string { + const maxLen = 100 + if len(v) <= maxLen { + return v + } + return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:]) +} + +func TestTransportDialTLS(t *testing.T) { + var mu sync.Mutex // guards following + var gotReq, didDial bool + + ts := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + gotReq = true + mu.Unlock() + }, + optOnlyServer, + ) + defer ts.Close() + tr := &Transport{ + DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { + mu.Lock() + didDial = true + mu.Unlock() + cfg.InsecureSkipVerify = true + c, err := tls.Dial(netw, addr, cfg) + if err != nil { + return nil, err + } + return c, c.Handshake() + }, + } + defer tr.CloseIdleConnections() + client := &http.Client{Transport: tr} + res, err := client.Get(ts.ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + mu.Lock() + if !gotReq { + t.Error("didn't get request") + } + if !didDial { + t.Error("didn't use dial hook") + } +} + +func TestConfigureTransport(t *testing.T) { + t1 := &http.Transport{} + err := ConfigureTransport(t1) + if err == errTransportVersion { + t.Skip(err) + } + if err != nil { + t.Fatal(err) + } + if got := fmt.Sprintf("%#v", *t1); !strings.Contains(got, `"h2"`) { + // Laziness, to avoid buildtags. + t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got) + } + wantNextProtos := []string{"h2", "http/1.1"} + if t1.TLSClientConfig == nil { + t.Errorf("nil t1.TLSClientConfig") + } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) { + t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos) + } + if err := ConfigureTransport(t1); err == nil { + t.Error("unexpected success on second call to ConfigureTransport") + } + + // And does it work? + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.Proto) + }, optOnlyServer) + defer st.Close() + + t1.TLSClientConfig.InsecureSkipVerify = true + c := &http.Client{Transport: t1} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(slurp), "HTTP/2.0"; got != want { + t.Errorf("body = %q; want %q", got, want) + } +} + +type capitalizeReader struct { + r io.Reader +} + +func (cr capitalizeReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + for i, b := range p[:n] { + if b >= 'a' && b <= 'z' { + p[i] = b - ('a' - 'A') + } + } + return +} + +type flushWriter struct { + w io.Writer +} + +func (fw flushWriter) Write(p []byte) (n int, err error) { + n, err = fw.w.Write(p) + if f, ok := fw.w.(http.Flusher); ok { + f.Flush() + } + return +} + +type clientTester struct { + t *testing.T + tr *Transport + sc, cc net.Conn // server and client conn + fr *Framer // server's framer + client func() error + server func() error +} + +func newClientTester(t *testing.T) *clientTester { + var dialOnce struct { + sync.Mutex + dialed bool + } + ct := &clientTester{ + t: t, + } + ct.tr = &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialOnce.Lock() + defer dialOnce.Unlock() + if dialOnce.dialed { + return nil, errors.New("only one dial allowed in test mode") + } + dialOnce.dialed = true + return ct.cc, nil + }, + } + + ln := newLocalListener(t) + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + + } + sc, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + ln.Close() + ct.cc = cc + ct.sc = sc + ct.fr = NewFramer(sc, sc) + return ct +} + +func newLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err == nil { + return ln + } + ln, err = net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + return ln +} + +func (ct *clientTester) greet() { + buf := make([]byte, len(ClientPreface)) + _, err := io.ReadFull(ct.sc, buf) + if err != nil { + ct.t.Fatalf("reading client preface: %v", err) + } + f, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Fatalf("Reading client settings frame: %v", err) + } + if sf, ok := f.(*SettingsFrame); !ok { + ct.t.Fatalf("Wanted client settings frame; got %v", f) + _ = sf // stash it away? + } + if err := ct.fr.WriteSettings(); err != nil { + ct.t.Fatal(err) + } + if err := ct.fr.WriteSettingsAck(); err != nil { + ct.t.Fatal(err) + } +} + +func (ct *clientTester) cleanup() { + ct.tr.CloseIdleConnections() +} + +func (ct *clientTester) run() { + errc := make(chan error, 2) + ct.start("client", errc, ct.client) + ct.start("server", errc, ct.server) + defer ct.cleanup() + for i := 0; i < 2; i++ { + if err := <-errc; err != nil { + ct.t.Error(err) + return + } + } +} + +func (ct *clientTester) start(which string, errc chan<- error, fn func() error) { + go func() { + finished := false + var err error + defer func() { + if !finished { + err = fmt.Errorf("%s goroutine didn't finish.", which) + } else if err != nil { + err = fmt.Errorf("%s: %v", which, err) + } + errc <- err + }() + err = fn() + finished = true + }() +} + +type countingReader struct { + n *int64 +} + +func (r countingReader) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(i) + } + atomic.AddInt64(r.n, int64(len(p))) + return len(p), err +} + +func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } +func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } + +func testTransportReqBodyAfterResponse(t *testing.T, status int) { + const bodySize = 10 << 20 + ct := newClientTester(t) + ct.client = func() error { + var n int64 // atomic + req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize)) + if err != nil { + return err + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != status { + return fmt.Errorf("status code = %v; want %v", res.StatusCode, status) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("unexpected body: %q", slurp) + } + if status == 200 { + if got := atomic.LoadInt64(&n); got != bodySize { + return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize) + } + } else { + if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize { + return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + var dataRecv int64 + var closed bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + //println(fmt.Sprintf("server got frame: %v", f)) + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + if f.StreamEnded() { + return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) + } + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if dataRecv == 0 { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + dataRecv += int64(dataLen) + + if !closed && ((status != 200 && dataRecv > 0) || + (status == 200 && dataRecv == bodySize)) { + closed = true + if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { + return err + } + return nil + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +// See golang.org/issue/13444 +func TestTransportFullDuplex(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) // redundant but for clarity + w.(http.Flusher).Flush() + io.Copy(flushWriter{w}, capitalizeReader{r.Body}) + fmt.Fprintf(w, "bye.\n") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + pr, pw := io.Pipe() + req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) + if err != nil { + t.Fatal(err) + } + req.ContentLength = -1 + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200) + } + bs := bufio.NewScanner(res.Body) + want := func(v string) { + if !bs.Scan() { + t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err()) + } + } + write := func(v string) { + _, err := io.WriteString(pw, v) + if err != nil { + t.Fatalf("pipe write: %v", err) + } + } + write("foo\n") + want("FOO") + write("bar\n") + want("BAR") + pw.Close() + want("bye.") + if err := bs.Err(); err != nil { + t.Fatal(err) + } +} + +func TestTransportConnectRequest(t *testing.T) { + gotc := make(chan *http.Request, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + gotc <- r + }, optOnlyServer) + defer st.Close() + + u, err := url.Parse(st.ts.URL) + if err != nil { + t.Fatal(err) + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + tests := []struct { + req *http.Request + want string + }{ + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + }, + want: u.Host, + }, + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + Host: "example.com:123", + }, + want: "example.com:123", + }, + } + + for i, tt := range tests { + res, err := c.Do(tt.req) + if err != nil { + t.Errorf("%d. RoundTrip = %v", i, err) + continue + } + res.Body.Close() + req := <-gotc + if req.Method != "CONNECT" { + t.Errorf("method = %q; want CONNECT", req.Method) + } + if req.Host != tt.want { + t.Errorf("Host = %q; want %q", req.Host, tt.want) + } + if req.URL.Host != tt.want { + t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want) + } + } +} + +type headerType int + +const ( + noHeader headerType = iota // omitted + oneHeader + splitHeader // broken into continuation on purpose +) + +const ( + f0 = noHeader + f1 = oneHeader + f2 = splitHeader + d0 = false + d1 = true +) + +// Test all 36 combinations of response frame orders: +// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) } +// Generated by http://play.golang.org/p/SScqYKJYXd +func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) } +func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) } +func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) } +func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) } +func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) } +func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) } +func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) } +func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) } +func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) } +func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) } +func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) } +func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) } +func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) } +func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) } +func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) } +func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) } +func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) } +func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) } +func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) } +func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) } +func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) } +func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) } +func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) } +func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) } +func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) } +func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) } +func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) } +func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) } +func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) } +func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) } +func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) } +func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) } +func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) } +func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) } +func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) } +func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) } + +func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) { + const reqBody = "some request body" + const resBody = "some response body" + + if resHeader == noHeader { + // TODO: test 100-continue followed by immediate + // server stream reset, without headers in the middle? + panic("invalid combination") + } + + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) + if expect100Continue != noHeader { + req.Header.Set("Expect", "100-continue") + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + wantBody := resBody + if !withData { + wantBody = "" + } + if string(slurp) != wantBody { + return fmt.Errorf("body = %q; want %q", slurp, wantBody) + } + if trailers == noHeader { + if len(res.Trailer) > 0 { + t.Errorf("Trailer = %v; want none", res.Trailer) + } + } else { + want := http.Header{"Some-Trailer": {"some-value"}} + if !reflect.DeepEqual(res.Trailer, want) { + t.Errorf("Trailer = %v; want %v", res.Trailer, want) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + endStream := false + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *DataFrame: + if !f.StreamEnded() { + // No need to send flow control tokens. The test request body is tiny. + continue + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"}) + enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"}) + if trailers != noHeader { + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"}) + } + endStream = withData == false && trailers == noHeader + send(resHeader) + } + if withData { + endStream = trailers == noHeader + ct.fr.WriteData(f.StreamID, endStream, []byte(resBody)) + } + if trailers != noHeader { + endStream = true + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) + send(trailers) + } + if endStream { + return nil + } + case *HeadersFrame: + if expect100Continue != noHeader { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + send(expect100Continue) + } + } + } + } + ct.run() +} + +func TestTransportReceiveUndeclaredTrailer(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + if _, ok := res.Trailer["Some-Trailer"]; !ok { + return fmt.Errorf("expected Some-Trailer") + } + return nil + } + ct.server = func() error { + ct.greet() + + var n int + var hf *HeadersFrame + for hf == nil && n < 10 { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + hf, _ = f.(*HeadersFrame) + n++ + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + // send headers without Trailer header + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // send trailers + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + ct.run() +} + +func TestTransportInvalidTrailer_Pseudo1(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, oneHeader) +} +func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, splitHeader) +} +func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + }) +} + +func TestTransportInvalidTrailer_Capital1(t *testing.T) { + testTransportInvalidTrailer_Capital(t, oneHeader) +} +func TestTransportInvalidTrailer_Capital2(t *testing.T) { + testTransportInvalidTrailer_Capital(t, splitHeader) +} +func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) + }) +} + +func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != wantErr { + return fmt.Errorf("res.Body ReadAll error = %q, %#v; want %T of %#v", slurp, err, wantErr, wantErr) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + var endStream bool + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"}) + endStream = false + send(oneHeader) + } + // Trailers: + { + endStream = true + buf.Reset() + writeTrailer(enc) + send(trailers) + } + return nil + } + } + } + ct.run() +} + +func TestTransportChecksResponseHeaderListSize(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != errResponseHeaderListSize { + if res != nil { + res.Body.Close() + } + size := int64(0) + for k, vv := range res.Header { + for _, v := range vv { + size += int64(len(k)) + int64(len(v)) + 32 + } + } + return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + large := strings.Repeat("a", 1<<10) + for i := 0; i < 5042; i++ { + enc.WriteField(hpack.HeaderField{Name: large, Value: large}) + } + if size, want := buf.Len(), 6329; size != want { + // Note: this number might change if + // our hpack implementation + // changes. That's fine. This is + // just a sanity check that our + // response can fit in a single + // header block fragment frame. + return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + } + } + ct.run() +} + +// Test that the the Transport returns a typed error from Response.Body.Read calls +// when the server sends an error. (here we use a panic, since that should generate +// a stream error, but others like cancel should be similar) +func TestTransportBodyReadErrorType(t *testing.T) { + doPanic := make(chan bool, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() // force headers out + <-doPanic + panic("boom") + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + doPanic <- true + buf := make([]byte, 100) + n, err := res.Body.Read(buf) + want := StreamError{StreamID: 0x1, Code: 0x2} + if !reflect.DeepEqual(want, err) { + t.Errorf("Read = %v, %#v; want error %#v", n, err, want) + } +} + +// golang.org/issue/13924 +// This used to fail after many iterations, especially with -race: +// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race +func TestTransportDoubleCloseOnWriteError(t *testing.T) { + var ( + mu sync.Mutex + conn net.Conn // to close if set + ) + + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + if conn != nil { + conn.Close() + } + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + mu.Lock() + defer mu.Unlock() + conn = tc + return tc, nil + }, + } + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + c.Get(st.ts.URL) +} + +// Test that the http1 Transport.DisableKeepAlives option is respected +// and connections are closed as soon as idle. +// See golang.org/issue/14008 +func TestTransportDisableKeepAlives(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + connClosed := make(chan struct{}) // closed on tls.Conn.Close + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + return ¬eCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil + }, + } + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + select { + case <-connClosed: + case <-time.After(1 * time.Second): + t.Errorf("timeout") + } + +} + +// Test concurrent requests with Transport.DisableKeepAlives. We can share connections, +// but when things are totally idle, it still needs to close. +func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { + const D = 25 * time.Millisecond + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + time.Sleep(D) + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + var dials int32 + var conns sync.WaitGroup + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + atomic.AddInt32(&dials, 1) + conns.Add(1) + return ¬eCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil + }, + } + c := &http.Client{Transport: tr} + var reqs sync.WaitGroup + const N = 20 + for i := 0; i < N; i++ { + reqs.Add(1) + if i == N-1 { + // For the final request, try to make all the + // others close. This isn't verified in the + // count, other than the Log statement, since + // it's so timing dependent. This test is + // really to make sure we don't interrupt a + // valid request. + time.Sleep(D * 2) + } + go func() { + defer reqs.Done() + res, err := c.Get(st.ts.URL) + if err != nil { + t.Error(err) + return + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Error(err) + return + } + res.Body.Close() + }() + } + reqs.Wait() + conns.Wait() + t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N) +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} + +func isTimeout(err error) bool { + switch err := err.(type) { + case nil: + return false + case *url.Error: + return isTimeout(err.Err) + case net.Error: + return err.Timeout() + } + return false +} + +// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent. +func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) { + testTransportResponseHeaderTimeout(t, false) +} +func TestTransportResponseHeaderTimeout_Body(t *testing.T) { + testTransportResponseHeaderTimeout(t, true) +} + +func testTransportResponseHeaderTimeout(t *testing.T, body bool) { + ct := newClientTester(t) + ct.tr.t1 = &http.Transport{ + ResponseHeaderTimeout: 5 * time.Millisecond, + } + ct.client = func() error { + c := &http.Client{Transport: ct.tr} + var err error + var n int64 + const bodySize = 4 << 20 + if body { + _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize)) + } else { + _, err = c.Get("https://dummy.tld/") + } + if !isTimeout(err) { + t.Errorf("client expected timeout error; got %#v", err) + } + if body && n != bodySize { + t.Errorf("only read %d bytes of body; want %d", n, bodySize) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + switch f := f.(type) { + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + case *RSTStreamFrame: + if f.StreamID == 1 && f.ErrCode == ErrCodeCancel { + return nil + } + } + } + } + ct.run() +} + +func TestTransportDisableCompression(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + want := http.Header{ + "User-Agent": []string{"Go-http-client/2.0"}, + } + if !reflect.DeepEqual(r.Header, want) { + t.Errorf("request headers = %v; want %v", r.Header, want) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + t1: &http.Transport{ + DisableCompression: true, + }, + } + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + +// RFC 7540 section 8.1.2.2 +func TestTransportRejectsConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + tests := []struct { + key string + value []string + want string + }{ + { + key: "Upgrade", + value: []string{"anything"}, + want: "ERROR: http2: invalid Upgrade request header", + }, + { + key: "Connection", + value: []string{"foo"}, + want: "ERROR: http2: invalid Connection request header", + }, + { + key: "Connection", + value: []string{"close"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Connection", + value: []string{"close", "something-else"}, + want: "ERROR: http2: invalid Connection request header", + }, + { + key: "Connection", + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Proxy-Connection", // just deleted and ignored + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{""}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"foo"}, + want: "ERROR: http2: invalid Transfer-Encoding request header", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked", "other"}, + want: "ERROR: http2: invalid Transfer-Encoding request header", + }, + { + key: "Content-Length", + value: []string{"123"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Keep-Alive", + value: []string{"doop"}, + want: "Accept-Encoding,User-Agent", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header[tt.key] = tt.value + res, err := tr.RoundTrip(req) + var got string + if err != nil { + got = fmt.Sprintf("ERROR: %v", err) + } else { + got = res.Header.Get("Got-Header") + res.Body.Close() + } + if got != tt.want { + t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want) + } + } +} + +// golang.org/issue/14048 +func TestTransportFailsOnInvalidHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tests := [...]struct { + h http.Header + wantErr string + }{ + 0: { + h: http.Header{"with space": {"foo"}}, + wantErr: `invalid HTTP header name "with space"`, + }, + 1: { + h: http.Header{"name": {"Брэд"}}, + wantErr: "", // okay + }, + 2: { + h: http.Header{"имя": {"Brad"}}, + wantErr: `invalid HTTP header name "имя"`, + }, + 3: { + h: http.Header{"foo": {"foo\x01bar"}}, + wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`, + }, + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header = tt.h + res, err := tr.RoundTrip(req) + var bad bool + if tt.wantErr == "" { + if err != nil { + bad = true + t.Errorf("case %d: error = %v; want no error", i, err) + } + } else { + if !strings.Contains(fmt.Sprint(err), tt.wantErr) { + bad = true + t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr) + } + } + if err == nil { + if bad { + t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header")) + } + res.Body.Close() + } + } +} + +// Tests that gzipReader doesn't crash on a second Read call following +// the first Read call's gzip.NewReader returning an error. +func TestGzipReader_DoubleReadCrash(t *testing.T) { + gz := &gzipReader{ + body: ioutil.NopCloser(strings.NewReader("0123456789")), + } + var buf [1]byte + n, err1 := gz.Read(buf[:]) + if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { + t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) + } + n, err2 := gz.Read(buf[:]) + if n != 0 || err2 != err1 { + t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) + } +} + +func TestTransportNewTLSConfig(t *testing.T) { + tests := [...]struct { + conf *tls.Config + host string + want *tls.Config + }{ + // Normal case. + 0: { + conf: nil, + host: "foo.com", + want: &tls.Config{ + ServerName: "foo.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // User-provided name (bar.com) takes precedence: + 1: { + conf: &tls.Config{ + ServerName: "bar.com", + }, + host: "foo.com", + want: &tls.Config{ + ServerName: "bar.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // NextProto is prepended: + 2: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar"}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{NextProtoTLS, "foo", "bar"}, + }, + }, + + // NextProto is not duplicated: + 3: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + }, + } + for i, tt := range tests { + tr := &Transport{TLSClientConfig: tt.conf} + got := tr.newTLSConfig(tt.host) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. got %#v; want %#v", i, got, tt.want) + } + } +} + +// The Google GFE responds to HEAD requests with a HEADERS frame +// without END_STREAM, followed by a 0-length DATA frame with +// END_STREAM. Make sure we don't get confused by that. (We did.) +func TestTransportReadHeadResponse(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != 123 { + return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, nil) + + <-clientDone + return nil + } + } + ct.run() +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// golang.org/issue/15425: test that a handler closing the request +// body doesn't terminate the stream to the peer. (It just stops +// readability from the handler's side, and eventually the client +// runs out of flow control tokens) +func TestTransportHandlerBodyClose(t *testing.T) { + const bodySize = 10 << 20 + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + r.Body.Close() + io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + g0 := runtime.NumGoroutine() + + const numReq = 10 + for i := 0; i < numReq; i++ { + req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if n != bodySize || err != nil { + t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) + } + } + tr.CloseIdleConnections() + + gd := runtime.NumGoroutine() - g0 + if gd > numReq/2 { + t.Errorf("appeared to leak goroutines") + } + +} + +// https://golang.org/issue/15930 +func TestTransportFlowControl(t *testing.T) { + const ( + total = 100 << 20 // 100MB + bufLen = 1 << 16 + ) + + var wrote int64 // updated atomically + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, bufLen) + for wrote < total { + n, err := w.Write(b) + atomic.AddInt64(&wrote, int64(n)) + if err != nil { + t.Errorf("ResponseWriter.Write error: %v", err) + break + } + w.(http.Flusher).Flush() + } + }, optOnlyServer) + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal("NewRequest error:", err) + } + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal("RoundTrip error:", err) + } + defer resp.Body.Close() + + var read int64 + b := make([]byte, bufLen) + for { + n, err := resp.Body.Read(b) + if err == io.EOF { + break + } + if err != nil { + t.Fatal("Read error:", err) + } + read += int64(n) + + const max = transportDefaultStreamFlow + if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max { + t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read) + } + + // Let the server get ahead of the client. + time.Sleep(1 * time.Millisecond) + } +} + +// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make +// the Transport remember it and return it back to users (via +// RoundTrip or request body reads) if needed (e.g. if the server +// proceeds to close the TCP connection before the client gets its +// response) +func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) { + testTransportUsesGoAwayDebugError(t, false) +} + +func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { + testTransportUsesGoAwayDebugError(t, true) +} + +func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const goAwayErrCode = ErrCodeHTTP11Required // arbitrary + const goAwayDebugData = "some debug data" + + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if failMidBody { + if err != nil { + return fmt.Errorf("unexpected client RoundTrip error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + } + want := GoAwayError{ + LastStreamID: 5, + ErrCode: goAwayErrCode, + DebugData: goAwayDebugData, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("RoundTrip error = %T: %#v, want %T (%#T)", err, err, want, want) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + if failMidBody { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + // Write two GOAWAY frames, to test that the Transport takes + // the interesting parts of both. + ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) + ct.fr.WriteGoAway(5, goAwayErrCode, nil) + ct.sc.Close() + <-clientDone + return nil + } + } + ct.run() +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 9050bebb33f..27ef0dd4d72 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -7,10 +7,12 @@ package http2 import ( "bytes" "fmt" + "log" "net/http" "time" "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. @@ -23,7 +25,11 @@ type writeFramer interface { // frame writing scheduler (see writeScheduler in writesched.go). // // This interface is implemented by *serverConn. -// TODO: use it from the client code too, once it exists. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. type writeContext interface { Framer() *Framer Flush() error @@ -91,6 +97,16 @@ func (w *writeData) writeFrame(ctx writeContext) error { return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) } +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + func (se StreamError) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) } @@ -108,40 +124,48 @@ func (writeSettingsAck) writeFrame(ctx writeContext) error { } // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers from a server handler. +// for HTTP response headers or trailers from a server handler. type writeResHeaders struct { streamID uint32 - httpResCode int + httpResCode int // 0 means no ":status" line h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. endStream bool + date string contentType string contentLength string } +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + func (w *writeResHeaders) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)}) - for k, vv := range w.h { - k = lowerHeader(k) - for _, v := range vv { - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if k == "transfer-encoding" && v != "trailers" { - continue - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) - } + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) } + + encodeHeaders(enc, w.h, w.trailers) + if w.contentType != "" { - enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType}) + encKV(enc, "content-type", w.contentType) } if w.contentLength != "" { - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength}) + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) } headerBlock := buf.Bytes() - if len(headerBlock) == 0 { + if len(headerBlock) == 0 && w.trailers == nil { panic("unexpected empty hpack") } @@ -187,7 +211,7 @@ type write100ContinueHeadersFrame struct { func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + encKV(enc, ":status", "100") return ctx.Framer().WriteHeaders(HeadersFrameParam{ StreamID: w.streamID, BlockFragment: buf.Bytes(), @@ -204,3 +228,37 @@ type writeWindowUpdate struct { func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) } + +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go new file mode 100644 index 00000000000..0590cfde933 --- /dev/null +++ b/vendor/golang.org/x/net/http2/z_spec_test.go @@ -0,0 +1,356 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/xml" + "flag" + "fmt" + "io" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" +) + +var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") + +// The global map of sentence coverage for the http2 spec. +var defaultSpecCoverage specCoverage + +var loadSpecOnce sync.Once + +func loadSpec() { + if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { + panic(err) + } else { + defaultSpecCoverage = readSpecCov(f) + f.Close() + } +} + +// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not +// "covered" will be included in report outputted by TestSpecCoverage. +func covers(sec, sentences string) { + loadSpecOnce.Do(loadSpec) + defaultSpecCoverage.cover(sec, sentences) +} + +type specPart struct { + section string + sentence string +} + +func (ss specPart) Less(oo specPart) bool { + atoi := func(s string) int { + n, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return n + } + a := strings.Split(ss.section, ".") + b := strings.Split(oo.section, ".") + for len(a) > 0 { + if len(b) == 0 { + return false + } + x, y := atoi(a[0]), atoi(b[0]) + if x == y { + a, b = a[1:], b[1:] + continue + } + return x < y + } + if len(b) > 0 { + return true + } + return false +} + +type bySpecSection []specPart + +func (a bySpecSection) Len() int { return len(a) } +func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } +func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type specCoverage struct { + coverage map[specPart]bool + d *xml.Decoder +} + +func joinSection(sec []int) string { + s := fmt.Sprintf("%d", sec[0]) + for _, n := range sec[1:] { + s = fmt.Sprintf("%s.%d", s, n) + } + return s +} + +func (sc specCoverage) readSection(sec []int) { + var ( + buf = new(bytes.Buffer) + sub = 0 + ) + for { + tk, err := sc.d.Token() + if err != nil { + if err == io.EOF { + return + } + panic(err) + } + switch v := tk.(type) { + case xml.StartElement: + if skipElement(v) { + if err := sc.d.Skip(); err != nil { + panic(err) + } + if v.Name.Local == "section" { + sub++ + } + break + } + switch v.Name.Local { + case "section": + sub++ + sc.readSection(append(sec, sub)) + case "xref": + buf.Write(sc.readXRef(v)) + } + case xml.CharData: + if len(sec) == 0 { + break + } + buf.Write(v) + case xml.EndElement: + if v.Name.Local == "section" { + sc.addSentences(joinSection(sec), buf.String()) + return + } + } + } +} + +func (sc specCoverage) readXRef(se xml.StartElement) []byte { + var b []byte + for { + tk, err := sc.d.Token() + if err != nil { + panic(err) + } + switch v := tk.(type) { + case xml.CharData: + if b != nil { + panic("unexpected CharData") + } + b = []byte(string(v)) + case xml.EndElement: + if v.Name.Local != "xref" { + panic("expected ") + } + if b != nil { + return b + } + sig := attrSig(se) + switch sig { + case "target": + return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) + case "fmt-of,rel,target", "fmt-,,rel,target": + return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) + case "fmt-of,sec,target", "fmt-,,sec,target": + return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) + case "fmt-of,rel,sec,target": + return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) + default: + panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) + } + default: + panic(fmt.Sprintf("unexpected tag %q", v)) + } + } +} + +var skipAnchor = map[string]bool{ + "intro": true, + "Overview": true, +} + +var skipTitle = map[string]bool{ + "Acknowledgements": true, + "Change Log": true, + "Document Organization": true, + "Conventions and Terminology": true, +} + +func skipElement(s xml.StartElement) bool { + switch s.Name.Local { + case "artwork": + return true + case "section": + for _, attr := range s.Attr { + switch attr.Name.Local { + case "anchor": + if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { + return true + } + case "title": + if skipTitle[attr.Value] { + return true + } + } + } + } + return false +} + +func readSpecCov(r io.Reader) specCoverage { + sc := specCoverage{ + coverage: map[specPart]bool{}, + d: xml.NewDecoder(r)} + sc.readSection(nil) + return sc +} + +func (sc specCoverage) addSentences(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + sc.coverage[specPart{sec, s}] = false + } +} + +func (sc specCoverage) cover(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + p := specPart{sec, s} + if _, ok := sc.coverage[p]; !ok { + panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) + } + sc.coverage[specPart{sec, s}] = true + } + +} + +var whitespaceRx = regexp.MustCompile(`\s+`) + +func parseSentences(sens string) []string { + sens = strings.TrimSpace(sens) + if sens == "" { + return nil + } + ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") + for i, s := range ss { + s = strings.TrimSpace(s) + if !strings.HasSuffix(s, ".") { + s += "." + } + ss[i] = s + } + return ss +} + +func TestSpecParseSentences(t *testing.T) { + tests := []struct { + ss string + want []string + }{ + {"Sentence 1. Sentence 2.", + []string{ + "Sentence 1.", + "Sentence 2.", + }}, + {"Sentence 1. \nSentence 2.\tSentence 3.", + []string{ + "Sentence 1.", + "Sentence 2.", + "Sentence 3.", + }}, + } + + for i, tt := range tests { + got := parseSentences(tt.ss) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d: got = %q, want %q", i, got, tt.want) + } + } +} + +func TestSpecCoverage(t *testing.T) { + if !*coverSpec { + t.Skip() + } + + loadSpecOnce.Do(loadSpec) + + var ( + list []specPart + cv = defaultSpecCoverage.coverage + total = len(cv) + complete = 0 + ) + + for sp, touched := range defaultSpecCoverage.coverage { + if touched { + complete++ + } else { + list = append(list, sp) + } + } + sort.Stable(bySpecSection(list)) + + if testing.Short() && len(list) > 5 { + list = list[:5] + } + + for _, p := range list { + t.Errorf("\tSECTION %s: %s", p.section, p.sentence) + } + + t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100) +} + +func attrSig(se xml.StartElement) string { + var names []string + for _, attr := range se.Attr { + if attr.Name.Local == "fmt" { + names = append(names, "fmt-"+attr.Value) + } else { + names = append(names, attr.Name.Local) + } + } + sort.Strings(names) + return strings.Join(names, ",") +} + +func attrValue(se xml.StartElement, attr string) string { + for _, a := range se.Attr { + if a.Name.Local == attr { + return a.Value + } + } + panic("unknown attribute " + attr) +} + +func TestSpecPartLess(t *testing.T) { + tests := []struct { + sec1, sec2 string + want bool + }{ + {"6.2.1", "6.2", false}, + {"6.2", "6.2.1", true}, + {"6.10", "6.10.1", true}, + {"6.10", "6.1.1", false}, // 10, not 1 + {"6.1", "6.1", false}, // equal, so not less + } + for _, tt := range tests { + got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) + if got != tt.want { + t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go new file mode 100644 index 00000000000..75db991dfd3 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -0,0 +1,41 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A DstUnreach represents an ICMP destination unreachable message +// body. +type DstUnreach struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *DstUnreach) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DstUnreach) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseDstUnreach parses b as an ICMP destination unreachable message +// body. +func parseDstUnreach(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &DstUnreach{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go new file mode 100644 index 00000000000..dd551811576 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Echo represents an ICMP echo request or reply message body. +type Echo struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *Echo) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *Echo) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) + copy(b[4:], p.Data) + return b, nil +} + +// parseEcho parses b as an ICMP echo request or reply message body. +func parseEcho(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go new file mode 100644 index 00000000000..a68bfb010f5 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/endpoint.go @@ -0,0 +1,113 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "runtime" + "syscall" + "time" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var _ net.PacketConn = &PacketConn{} + +// A PacketConn represents a packet network endpoint that uses either +// ICMPv4 or ICMPv6. +type PacketConn struct { + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn +} + +func (c *PacketConn) ok() bool { return c != nil && c.c != nil } + +// IPv4PacketConn returns the ipv4.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv4. +func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { + if !c.ok() { + return nil + } + return c.p4 +} + +// IPv6PacketConn returns the ipv6.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv6. +func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { + if !c.ok() { + return nil + } + return c.p6 +} + +// ReadFrom reads an ICMP message from the connection. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + // Please be informed that ipv4.NewPacketConn enables + // IP_STRIPHDR option by default on Darwin. + // See golang.org/issue/9395 for further information. + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err + } + return c.c.ReadFrom(b) +} + +// WriteTo writes the ICMP message b to dst. +// Dst must be net.UDPAddr when c is a non-privileged +// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr. +func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.c.WriteTo(b, dst) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.Close() +} + +// LocalAddr returns the local network address. +func (c *PacketConn) LocalAddr() net.Addr { + if !c.ok() { + return nil + } + return c.c.LocalAddr() +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetWriteDeadline(t) +} diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go new file mode 100644 index 00000000000..1df4ceccdd4 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/example_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "log" + "net" + "os" + "runtime" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") + if err != nil { + log.Fatal(err) + } + defer c.Close() + + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1, + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil { + log.Fatal(err) + } + + rb := make([]byte, 1500) + n, peer, err := c.ReadFrom(rb) + if err != nil { + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + switch rm.Type { + case ipv6.ICMPTypeEchoReply: + log.Printf("got reflection from %v", peer) + default: + log.Printf("got %+v; want echo reply", rm) + } +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go new file mode 100644 index 00000000000..402a7514b05 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Extension represents an ICMP extension. +type Extension interface { + // Len returns the length of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +const extensionVersion = 2 + +func validExtensionHeader(b []byte) bool { + v := int(b[0]&0xf0) >> 4 + s := binary.BigEndian.Uint16(b[2:4]) + if s != 0 { + s = checksum(b) + } + if v != extensionVersion || s != 0 { + return false + } + return true +} + +// parseExtensions parses b as a list of ICMP extensions. +// The length attribute l must be the length attribute field in +// received icmp messages. +// +// It will return a list of ICMP extensions and an adjusted length +// attribute that represents the length of the padded original +// datagram field. Otherwise, it returns an error. +func parseExtensions(b []byte, l int) ([]Extension, int, error) { + // Still a lot of non-RFC 4884 compliant implementations are + // out there. Set the length attribute l to 128 when it looks + // inappropriate for backwards compatibility. + // + // A minimal extension at least requires 8 octets; 4 octets + // for an extension header, and 4 octets for a single object + // header. + // + // See RFC 4884 for further information. + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { + return nil, -1, errNoExtension + } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } + var exts []Extension + for b = b[l+4:]; len(b) >= 4; { + ol := int(binary.BigEndian.Uint16(b[:2])) + if 4 > ol || ol > len(b) { + break + } + switch b[2] { + case classMPLSLabelStack: + ext, err := parseMPLSLabelStack(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceInfo: + ext, err := parseInterfaceInfo(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + } + b = b[ol:] + } + return exts, l, nil +} diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go new file mode 100644 index 00000000000..0b3f7b9e156 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension_test.go @@ -0,0 +1,259 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/internal/iana" +) + +var marshalAndParseExtensionTests = []struct { + proto int + hdr []byte + obj []byte + exts []Extension +}{ + // MPLS label stack with no label + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x01, 0x01, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + }, + }, + }, + // MPLS label stack with a single label + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x08, 0x01, 0x01, + 0x03, 0xe8, 0xe9, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // MPLS label stack with multiple labels + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x01, 0x01, + 0x03, 0xe8, 0xde, 0xfe, + 0x03, 0xe8, 0xe1, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16013, + TC: 0x7, + S: false, + TTL: 254, + }, + { + Label: 16014, + TC: 0, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // Interface information with no attribute + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x02, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + }, + }, + }, + // Interface information with ifIndex and name + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x02, 0x0a, + 0x00, 0x00, 0x00, 0x10, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0a, + Interface: &net.Interface{ + Index: 16, + Name: "en101", + }, + }, + }, + }, + // Interface information with ifIndex, IPAddr, name and MTU + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x28, 0x02, 0x0f, + 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x02, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, +} + +func TestMarshalAndParseExtension(t *testing.T) { + for i, tt := range marshalAndParseExtensionTests { + for j, ext := range tt.exts { + var err error + var b []byte + switch ext := ext.(type) { + case *MPLSLabelStack: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + case *InterfaceInfo: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + } + if !reflect.DeepEqual(b, tt.obj) { + t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj) + continue + } + } + + for j, wire := range []struct { + data []byte // original datagram + inlattr int // length of padded original datagram, a hint + outlattr int // length of padded original datagram, a want + err error + }{ + {nil, 0, -1, errNoExtension}, + {make([]byte, 127), 128, -1, errNoExtension}, + + {make([]byte, 128), 127, -1, errNoExtension}, + {make([]byte, 128), 128, -1, errNoExtension}, + {make([]byte, 128), 129, -1, errNoExtension}, + + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil}, + + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension}, + } { + exts, l, err := parseExtensions(wire.data, wire.inlattr) + if err != wire.err { + t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err) + continue + } + if wire.err != nil { + continue + } + if l != wire.outlattr { + t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr) + } + if !reflect.DeepEqual(exts, tt.exts) { + for j, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + want := tt.exts[j].(*MPLSLabelStack) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + case *InterfaceInfo: + want := tt.exts[j].(*InterfaceInfo) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + } + } + continue + } + } + } +} + +var parseInterfaceNameTests = []struct { + b []byte + error +}{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, +} + +func TestParseInterfaceName(t *testing.T) { + ifi := InterfaceInfo{Interface: &net.Interface{}} + for i, tt := range parseInterfaceNameTests { + if _, err := ifi.parseName(tt.b); err != tt.error { + t.Errorf("#%d: got %v; want %v", i, err, tt.error) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/helper.go b/vendor/golang.org/x/net/icmp/helper.go new file mode 100644 index 00000000000..6c4e633bc95 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 + + nativeEndian binary.ByteOrder +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go new file mode 100644 index 00000000000..398fd388ffd --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "strconv" + "syscall" +) + +func sockaddr(family int, address string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + a, err := net.ResolveIPAddr("ip4", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv4zero + } + if a.IP = a.IP.To4(); a.IP == nil { + return nil, net.InvalidAddrError("non-ipv4 address") + } + sa := &syscall.SockaddrInet4{} + copy(sa.Addr[:], a.IP) + return sa, nil + case syscall.AF_INET6: + a, err := net.ResolveIPAddr("ip6", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv6unspecified + } + if a.IP.Equal(net.IPv4zero) { + a.IP = net.IPv6unspecified + } + if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { + return nil, net.InvalidAddrError("non-ipv6 address") + } + sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} + copy(sa.Addr[:], a.IP) + return sa, nil + default: + return nil, net.InvalidAddrError("unexpected family") + } +} + +func zoneToUint32(zone string) uint32 { + if zone == "" { + return 0 + } + if ifi, err := net.InterfaceByName(zone); err == nil { + return uint32(ifi.Index) + } + n, err := strconv.Atoi(zone) + if err != nil { + return 0 + } + return uint32(n) +} + +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go new file mode 100644 index 00000000000..78b5b98bf9c --- /dev/null +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -0,0 +1,236 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "strings" + + "golang.org/x/net/internal/iana" +) + +const ( + classInterfaceInfo = 2 + + afiIPv4 = 1 + afiIPv6 = 2 +) + +const ( + attrMTU = 1 << iota + attrName + attrIPAddr + attrIfIndex +) + +// An InterfaceInfo represents interface and next-hop identification. +type InterfaceInfo struct { + Class int // extension object class number + Type int // extension object sub-type + Interface *net.Interface + Addr *net.IPAddr +} + +func (ifi *InterfaceInfo) nameLen() int { + if len(ifi.Interface.Name) > 63 { + return 64 + } + l := 1 + len(ifi.Interface.Name) + return (l + 3) &^ 3 +} + +func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { + l = 4 + if ifi.Interface != nil && ifi.Interface.Index > 0 { + attrs |= attrIfIndex + l += 4 + if len(ifi.Interface.Name) > 0 { + attrs |= attrName + l += ifi.nameLen() + } + if ifi.Interface.MTU > 0 { + attrs |= attrMTU + l += 4 + } + } + if ifi.Addr != nil { + switch proto { + case iana.ProtocolICMP: + if ifi.Addr.IP.To4() != nil { + attrs |= attrIPAddr + l += 4 + net.IPv4len + } + case iana.ProtocolIPv6ICMP: + if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + attrs |= attrIPAddr + l += 4 + net.IPv6len + } + } + } + return +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceInfo) Len(proto int) int { + _, l := ifi.attrsAndLen(proto) + return l +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { + attrs, l := ifi.attrsAndLen(proto) + b := make([]byte, l) + if err := ifi.marshal(proto, b, attrs, l); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceInfo, byte(ifi.Type) + for b = b[4:]; len(b) > 0 && attrs != 0; { + switch { + case attrs&attrIfIndex != 0: + b = ifi.marshalIfIndex(proto, b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b = ifi.marshalIPAddr(proto, b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b = ifi.marshalName(proto, b) + attrs &^= attrName + case attrs&attrMTU != 0: + b = ifi.marshalMTU(proto, b) + attrs &^= attrMTU + } + } + return nil +} + +func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { + switch proto { + case iana.ProtocolICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) + copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) + b = b[4+net.IPv4len:] + case iana.ProtocolIPv6ICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) + copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) + b = b[4+net.IPv6len:] + } + return b +} + +func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + afi := int(binary.BigEndian.Uint16(b[:2])) + b = b[4:] + switch afi { + case afiIPv4: + if len(b) < net.IPv4len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv4len) + copy(ifi.Addr.IP, b[:net.IPv4len]) + b = b[net.IPv4len:] + case afiIPv6: + if len(b) < net.IPv6len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv6len) + copy(ifi.Addr.IP, b[:net.IPv6len]) + b = b[net.IPv6len:] + } + return b, nil +} + +func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { + l := byte(ifi.nameLen()) + b[0] = l + copy(b[1:], []byte(ifi.Interface.Name)) + return b[l:] +} + +func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { + if 4 > len(b) || len(b) < int(b[0]) { + return nil, errMessageTooShort + } + l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } + var name [63]byte + copy(name[:], b[1:l]) + ifi.Interface.Name = strings.Trim(string(name[:]), "\000") + return b[l:], nil +} + +func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func parseInterfaceInfo(b []byte) (Extension, error) { + ifi := &InterfaceInfo{ + Class: int(b[2]), + Type: int(b[3]), + } + if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { + ifi.Interface = &net.Interface{} + } + if ifi.Type&attrIPAddr != 0 { + ifi.Addr = &net.IPAddr{} + } + attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) + for b = b[4:]; len(b) > 0 && attrs != 0; { + var err error + switch { + case attrs&attrIfIndex != 0: + b, err = ifi.parseIfIndex(b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b, err = ifi.parseIPAddr(b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b, err = ifi.parseName(b) + attrs &^= attrName + case attrs&attrMTU != 0: + b, err = ifi.parseMTU(b) + attrs &^= attrMTU + } + if err != nil { + return nil, err + } + } + if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + ifi.Addr.Zone = ifi.Interface.Name + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go new file mode 100644 index 00000000000..729ddc97c00 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "runtime" + + "golang.org/x/net/ipv4" +) + +// ParseIPv4Header parses b as an IPv4 header of ICMP error message +// invoking packet, which is contained in ICMP error message. +func ParseIPv4Header(b []byte) (*ipv4.Header, error) { + if len(b) < ipv4.HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &ipv4.Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin": + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + case "freebsd": + if freebsdVersion >= 1000000 { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } else { + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } + h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-ipv4.HeaderLen > 0 { + h.Options = make([]byte, hdrlen-ipv4.HeaderLen) + copy(h.Options, b[ipv4.HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go new file mode 100644 index 00000000000..47cc00d0765 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -0,0 +1,82 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/ipv4" +) + +type ipv4HeaderTest struct { + wireHeaderFromKernel [ipv4.HeaderLen]byte + wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte + Header *ipv4.Header +} + +var ipv4HeaderLittleEndianTest = ipv4HeaderTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + wireHeaderFromKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: ipv4.DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, +} + +func TestParseIPv4Header(t *testing.T) { + tt := &ipv4HeaderLittleEndianTest + if nativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + var wh []byte + switch runtime.GOOS { + case "darwin": + wh = tt.wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion >= 1000000 { + wh = tt.wireHeaderFromKernel[:] + } else { + wh = tt.wireHeaderFromTradBSDKernel[:] + } + default: + wh = tt.wireHeaderFromKernel[:] + } + h, err := ParseIPv4Header(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } +} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go new file mode 100644 index 00000000000..58eaa77d025 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv6.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + + "golang.org/x/net/internal/iana" +) + +const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 + +// IPv6PseudoHeader returns an IPv6 pseudo header for checksum +// calculation. +func IPv6PseudoHeader(src, dst net.IP) []byte { + b := make([]byte, ipv6PseudoHeaderLen) + copy(b, src.To16()) + copy(b[net.IPv6len:], dst.To16()) + b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) + return b +} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go new file mode 100644 index 00000000000..b9f260796e9 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -0,0 +1,98 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "os" + "runtime" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + var family, proto int + switch network { + case "udp4": + family, proto = syscall.AF_INET, iana.ProtocolICMP + case "udp6": + family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP + default: + i := last(network, ':') + switch network[:i] { + case "ip4": + proto = iana.ProtocolICMP + case "ip6": + proto = iana.ProtocolIPv6ICMP + } + } + var cerr error + var c net.PacketConn + switch family { + case syscall.AF_INET, syscall.AF_INET6: + s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + defer syscall.Close(s) + if runtime.GOOS == "darwin" && family == syscall.AF_INET { + if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + return nil, os.NewSyscallError("setsockopt", err) + } + } + sa, err := sockaddr(family, address) + if err != nil { + return nil, err + } + if err := syscall.Bind(s, sa); err != nil { + return nil, os.NewSyscallError("bind", err) + } + f := os.NewFile(uintptr(s), "datagram-oriented icmp") + defer f.Close() + c, cerr = net.FilePacketConn(f) + default: + c, cerr = net.ListenPacket(network, address) + } + if cerr != nil { + return nil, cerr + } + switch proto { + case iana.ProtocolICMP: + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil + case iana.ProtocolIPv6ICMP: + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil + default: + return &PacketConn{c: c}, nil + } +} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go new file mode 100644 index 00000000000..668728d17e2 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package icmp + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + return nil, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go new file mode 100644 index 00000000000..42d6df2c13d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message.go @@ -0,0 +1,150 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package icmp provides basic functions for the manipulation of +// messages used in the Internet Control Message Protocols, +// ICMPv4 and ICMPv6. +// +// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. +// Multi-part message support for ICMP is defined in RFC 4884. +// ICMP extensions for MPLS are defined in RFC 4950. +// ICMP extensions for interface and next-hop identification are +// defined in RFC 5837. +package icmp // import "golang.org/x/net/icmp" + +import ( + "encoding/binary" + "errors" + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var ( + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errOpNoSupport = errors.New("operation not supported") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") +) + +func checksum(b []byte) uint16 { + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + return ^uint16(s) +} + +// A Type represents an ICMP message type. +type Type interface { + Protocol() int +} + +// A Message represents an ICMP message. +type Message struct { + Type Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Body MessageBody // body +} + +// Marshal returns the binary encoding of the ICMP message m. +// +// For an ICMPv4 message, the returned message always contains the +// calculated checksum field. +// +// For an ICMPv6 message, the returned message contains the calculated +// checksum field when psh is not nil, otherwise the kernel will +// compute the checksum field during the message transmission. +// When psh is not nil, it must be the pseudo header for IPv6. +func (m *Message) Marshal(psh []byte) ([]byte, error) { + var mtype int + switch typ := m.Type.(type) { + case ipv4.ICMPType: + mtype = int(typ) + case ipv6.ICMPType: + mtype = int(typ) + default: + return nil, syscall.EINVAL + } + b := []byte{byte(mtype), byte(m.Code), 0, 0} + if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil { + b = append(psh, b...) + } + if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 { + mb, err := m.Body.Marshal(m.Type.Protocol()) + if err != nil { + return nil, err + } + b = append(b, mb...) + } + if m.Type.Protocol() == iana.ProtocolIPv6ICMP { + if psh == nil { // cannot calculate checksum here + return b, nil + } + off, l := 2*net.IPv6len, len(b)-len(psh) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) + } + s := checksum(b) + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[len(psh)+2] ^= byte(s) + b[len(psh)+3] ^= byte(s >> 8) + return b[len(psh):], nil +} + +var parseFns = map[Type]func(int, []byte) (MessageBody, error){ + ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv4.ICMPTypeParameterProblem: parseParamProb, + + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + + ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv6.ICMPTypePacketTooBig: parsePacketTooBig, + ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv6.ICMPTypeParameterProblem: parseParamProb, + + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, +} + +// ParseMessage parses b as an ICMP message. +// Proto must be either the ICMPv4 or ICMPv6 protocol number. +func ParseMessage(proto int, b []byte) (*Message, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + var err error + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} + switch proto { + case iana.ProtocolICMP: + m.Type = ipv4.ICMPType(b[0]) + case iana.ProtocolIPv6ICMP: + m.Type = ipv6.ICMPType(b[0]) + default: + return nil, syscall.EINVAL + } + if fn, ok := parseFns[m.Type]; !ok { + m.Body, err = parseDefaultMessageBody(proto, b[4:]) + } else { + m.Body, err = fn(proto, b[4:]) + } + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go new file mode 100644 index 00000000000..5d2605f8d1e --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv4.ICMPTypePhoturis, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } +} + +var marshalAndParseMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypePacketTooBig, Code: 0, + Body: &icmp.PacketTooBig{ + MTU: 1<<16 - 1, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv6.ICMPTypeDuplicateAddressConfirmation, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } + } +} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go new file mode 100644 index 00000000000..2121a17be26 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/messagebody.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A MessageBody represents an ICMP message body. +type MessageBody interface { + // Len returns the length of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +// A DefaultMessageBody represents the default message body. +type DefaultMessageBody struct { + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *DefaultMessageBody) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} + +// parseDefaultMessageBody parses b as an ICMP message body. +func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) { + p := &DefaultMessageBody{Data: make([]byte, len(b))} + copy(p.Data, b) + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go new file mode 100644 index 00000000000..c314917488e --- /dev/null +++ b/vendor/golang.org/x/net/icmp/mpls.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A MPLSLabel represents a MPLS label stack entry. +type MPLSLabel struct { + Label int // label value + TC int // traffic class; formerly experimental use + S bool // bottom of stack + TTL int // time to live +} + +const ( + classMPLSLabelStack = 1 + typeIncomingMPLSLabelStack = 1 +) + +// A MPLSLabelStack represents a MPLS label stack. +type MPLSLabelStack struct { + Class int // extension object class number + Type int // extension object sub-type + Labels []MPLSLabel +} + +// Len implements the Len method of Extension interface. +func (ls *MPLSLabelStack) Len(proto int) int { + return 4 + (4 * len(ls.Labels)) +} + +// Marshal implements the Marshal method of Extension interface. +func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { + b := make([]byte, ls.Len(proto)) + if err := ls.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { + l := ls.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack + off := 4 + for _, ll := range ls.Labels { + b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) + b[off+2] |= byte(ll.TC << 1 & 0x0e) + if ll.S { + b[off+2] |= 0x1 + } + b[off+3] = byte(ll.TTL) + off += 4 + } + return nil +} + +func parseMPLSLabelStack(b []byte) (Extension, error) { + ls := &MPLSLabelStack{ + Class: int(b[2]), + Type: int(b[3]), + } + for b = b[4:]; len(b) >= 4; b = b[4:] { + ll := MPLSLabel{ + Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, + TC: int(b[2]&0x0e) >> 1, + TTL: int(b[3]), + } + if b[2]&0x1 != 0 { + ll.S = true + } + ls.Labels = append(ls.Labels, ll) + } + return ls, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go new file mode 100644 index 00000000000..f2713566060 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "golang.org/x/net/internal/iana" + +// multipartMessageBodyDataLen takes b as an original datagram and +// exts as extensions, and returns a required length for message body +// and a required length for a padded original datagram in wire +// format. +func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) { + for _, ext := range exts { + bodyLen += ext.Len(proto) + } + if bodyLen > 0 { + dataLen = multipartMessageOrigDatagramLen(proto, b) + bodyLen += 4 // length of extension header + } else { + dataLen = len(b) + } + bodyLen += dataLen + return bodyLen, dataLen +} + +// multipartMessageOrigDatagramLen takes b as an original datagram, +// and returns a required length for a padded orignal datagram in wire +// format. +func multipartMessageOrigDatagramLen(proto int, b []byte) int { + roundup := func(b []byte, align int) int { + // According to RFC 4884, the padded original datagram + // field must contain at least 128 octets. + if len(b) < 128 { + return 128 + } + r := len(b) + return (r + align - 1) & ^(align - 1) + } + switch proto { + case iana.ProtocolICMP: + return roundup(b, 4) + case iana.ProtocolIPv6ICMP: + return roundup(b, 8) + default: + return len(b) + } +} + +// marshalMultipartMessageBody takes data as an original datagram and +// exts as extesnsions, and returns a binary encoding of message body. +// It can be used for non-multipart message bodies when exts is nil. +func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts) + b := make([]byte, 4+bodyLen) + copy(b[4:], data) + off := dataLen + 4 + if len(exts) > 0 { + b[dataLen+4] = byte(extensionVersion << 4) + off += 4 // length of object header + for _, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceInfo: + attrs, l := ext.attrsAndLen(proto) + if err := ext.marshal(proto, b[off:], attrs, l); err != nil { + return nil, err + } + off += ext.Len(proto) + } + } + s := checksum(b[dataLen+4:]) + b[dataLen+4+2] ^= byte(s) + b[dataLen+4+3] ^= byte(s >> 8) + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } + } + return b, nil +} + +// parseMultipartMessageBody parses b as either a non-multipart +// message body or a multipart message body. +func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) { + var l int + switch proto { + case iana.ProtocolICMP: + l = 4 * int(b[1]) + case iana.ProtocolIPv6ICMP: + l = 8 * int(b[0]) + } + if len(b) == 4 { + return nil, nil, nil + } + exts, l, err := parseExtensions(b[4:], l) + if err != nil { + l = len(b) - 4 + } + data := make([]byte, l) + copy(data, b[4:]) + return data, exts, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go new file mode 100644 index 00000000000..966ccb8da11 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart_test.go @@ -0,0 +1,442 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 2).To4(), + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMultipartMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + if b[5] != 32 { + t.Errorf("#%v: got %v; want 32", i, b[5]) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv4.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeParameterProblem: + got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } +} + +var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en102", + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMultipartMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + if b[4] != 16 { + t.Errorf("#%v: got %v; want 16", i, b[4]) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv6.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv6.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } + } +} + +func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string { + var s string + for j, got := range gotExts { + switch got := got.(type) { + case *icmp.MPLSLabelStack: + want := wantExts[j].(*icmp.MPLSLabelStack) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want) + } + case *icmp.InterfaceInfo: + want := wantExts[j].(*icmp.InterfaceInfo) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + } + } + } + return s[:len(s)-1] +} + +var multipartMessageBodyLenTests = []struct { + proto int + in icmp.MessageBody + out int +}{ + { + iana.ProtocolICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.PacketTooBig{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // mtu and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // pointer and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 127), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram + }, +} + +func TestMultipartMessageBodyLen(t *testing.T) { + for i, tt := range multipartMessageBodyLenTests { + if out := tt.in.Len(tt.proto); out != tt.out { + t.Errorf("#%d: got %d; want %d", i, out, tt.out) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go new file mode 100644 index 00000000000..a1c9df7bff1 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A PacketTooBig represents an ICMP packet too big message body. +type PacketTooBig struct { + MTU int // maximum transmission unit of the nexthop link + Data []byte // data, known as original datagram field +} + +// Len implements the Len method of MessageBody interface. +func (p *PacketTooBig) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) + copy(b[4:], p.Data) + return b, nil +} + +// parsePacketTooBig parses b as an ICMP packet too big message body. +func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go new file mode 100644 index 00000000000..0a2548daaee --- /dev/null +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) + +// A ParamProb represents an ICMP parameter problem message body. +type ParamProb struct { + Pointer uintptr // offset within the data where the error was detected + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ParamProb) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ParamProb) Marshal(proto int) ([]byte, error) { + if proto == iana.ProtocolIPv6ICMP { + b := make([]byte, p.Len(proto)) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) + copy(b[4:], p.Data) + return b, nil + } + b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions) + if err != nil { + return nil, err + } + b[0] = byte(p.Pointer) + return b, nil +} + +// parseParamProb parses b as an ICMP parameter problem message body. +func parseParamProb(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ParamProb{} + if proto == iana.ProtocolIPv6ICMP { + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) + p.Data = make([]byte, len(b)-4) + copy(p.Data, b[4:]) + return p, nil + } + p.Pointer = uintptr(b[0]) + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go new file mode 100644 index 00000000000..4ec269284f8 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ping_test.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + netaddr := func(ip net.IP) (net.Addr, error) { + switch c.LocalAddr().(type) { + case *net.UDPAddr: + return &net.UDPAddr{IP: ip}, nil + case *net.IPAddr: + return &net.IPAddr{IP: ip}, nil + default: + return nil, errors.New("neither UDPAddr nor IPAddr") + } + } + for _, ip := range ips { + switch protocol { + case iana.ProtocolICMP: + if ip.To4() != nil { + return netaddr(ip) + } + case iana.ProtocolIPv6ICMP: + if ip.To16() != nil && ip.To4() == nil { + return netaddr(ip) + } + } + } + return nil, errors.New("no A or AAAA record") +} + +type pingTest struct { + network, address string + protocol int + mtype icmp.Type +} + +var nonPrivilegedPingTests = []pingTest{ + {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestNonPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + for i, tt := range nonPrivilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +var privilegedPingTests = []pingTest{ + {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + for i, tt := range privilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +func doPing(tt pingTest, seq int) error { + c, err := icmp.ListenPacket(tt.network, tt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, tt.protocol) + if err != nil { + return err + } + + if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + wm := icmp.Message{ + Type: tt.mtype, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) + if err != nil { + return err + } + switch rm.Type { + case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) + } +} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go new file mode 100644 index 00000000000..c75f3ddaa7b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "syscall" + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") +} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go new file mode 100644 index 00000000000..344e15848d5 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A TimeExceeded represents an ICMP time exceeded message body. +type TimeExceeded struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *TimeExceeded) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseTimeExceeded parses b as an ICMP time exceeded message body. +func parseTimeExceeded(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &TimeExceeded{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 00000000000..3daa8979e1d --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 (Internationalized Domain Names for +// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and +// RFC 5894. +package idna // import "golang.org/x/net/idna" + +import ( + "strings" + "unicode/utf8" +) + +// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or +// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". +func ToASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". +func ToUnicode(s string) (string, error) { + if !strings.Contains(s, acePrefix) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if strings.HasPrefix(label, acePrefix) { + u, err := decode(label[len(acePrefix):]) + if err != nil { + return "", err + } + labels[i] = u + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go new file mode 100644 index 00000000000..b1bc6fa225c --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna_test.go @@ -0,0 +1,43 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "testing" +) + +var idnaTestCases = [...]struct { + ascii, unicode string +}{ + // Labels. + {"books", "books"}, + {"xn--bcher-kva", "bücher"}, + + // Domains. + {"foo--xn--bar.org", "foo--xn--bar.org"}, + {"golang.org", "golang.org"}, + {"example.xn--p1ai", "example.рф"}, + {"xn--czrw28b.tw", "商業.tw"}, + {"www.xn--mller-kva.de", "www.müller.de"}, +} + +func TestIDNA(t *testing.T) { + for _, tc := range idnaTestCases { + if a, err := ToASCII(tc.unicode); err != nil { + t.Errorf("ToASCII(%q): %v", tc.unicode, err) + } else if a != tc.ascii { + t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii) + } + + if u, err := ToUnicode(tc.ascii); err != nil { + t.Errorf("ToUnicode(%q): %v", tc.ascii, err) + } else if u != tc.unicode { + t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode) + } + } +} + +// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode +// return errors. diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 00000000000..92e733f6a7b --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go new file mode 100644 index 00000000000..bfec81decd0 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode_test.go @@ -0,0 +1,198 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "strings" + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3B. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) -with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) 2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) MajiKoi5 + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) de + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := decode(tc.encoded); err != nil { + t.Errorf("decode(%q): %v", tc.encoded, err) + } else if got != tc.s { + t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s) + } + + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} + +var punycodeErrorTestCases = [...]string{ + "decode -", // A sole '-' is invalid. + "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z]. + "decode foo#bar", // '#' is not in [0-9A-Za-z]. + "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z]. + "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated. + "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF. + "decode 9999999999a", // "9999999999a" overflows the int32 calculation. + + "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow. +} + +func TestPunycodeErrors(t *testing.T) { + for _, tc := range punycodeErrorTestCases { + var err error + switch { + case strings.HasPrefix(tc, "decode "): + _, err = decode(tc[7:]) + case strings.HasPrefix(tc, "encode "): + _, err = encode("", tc[7:]) + } + if err == nil { + if len(tc) > 256 { + tc = tc[:100] + "..." + tc[len(tc)-100:] + } + t.Errorf("no error for %s", tc) + } + } +} diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 00000000000..3438a27c8d8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,180 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2013-06-25 +const ( + DiffServCS0 = 0x0 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEFPHB = 0xb8 // EF PHB + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT +) + +// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 +const ( + NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x3 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2015-10-06 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 00000000000..2d8c07ca1bc --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,293 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", + parseTOSTCByte, + }, + { + "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + drs := dr.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range drs { + fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + PoolRecords []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>record"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>registry>record"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escape() []canonDSCPRecord { + drs := make([]canonDSCPRecord, len(drr.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range drr.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + return drs +} + +func parseTOSTCByte(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var ttb tosTCByte + if err := dec.Decode(&ttb); err != nil { + return err + } + trs := ttb.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) + fmt.Fprintf(w, "const (\n") + for _, tr := range trs { + fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) + fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type tosTCByte struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + Records []struct { + Binary string `xml:"binary"` + Keyword string `xml:"keyword"` + } `xml:"registry>record"` +} + +type canonTOSTCByteRecord struct { + OrigKeyword string + Keyword string + Value int +} + +func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { + trs := make([]canonTOSTCByteRecord, len(ttb.Records)) + sr := strings.NewReplacer( + "Capable", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, tr := range ttb.Records { + s := strings.TrimSpace(tr.Keyword) + trs[i].OrigKeyword = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + trs[i].Keyword = strings.Join(ss[1:], " ") + } else { + trs[i].Keyword = ss[0] + } + trs[i].Keyword = sr.Replace(trs[i].Keyword) + n, err := strconv.ParseUint(tr.Binary, 2, 8) + if err != nil { + continue + } + trs[i].Value = int(n) + } + return trs +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/internal/nettest/error_posix.go b/vendor/golang.org/x/net/internal/nettest/error_posix.go new file mode 100644 index 00000000000..963ed99655b --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/error_posix.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package nettest + +import ( + "os" + "syscall" +) + +func protocolNotSupported(err error) bool { + switch err := err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + case *os.SyscallError: + switch err := err.Err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + } + } + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/error_stub.go b/vendor/golang.org/x/net/internal/nettest/error_stub.go new file mode 100644 index 00000000000..3c74d812f65 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/error_stub.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +func protocolNotSupported(err error) bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go new file mode 100644 index 00000000000..53ae13a987a --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/interface.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import "net" + +// IsMulticastCapable reports whether ifi is an IP multicast-capable +// network interface. Network must be "ip", "ip4" or "ip6". +func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, false + } + if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { + return nil, false + } + return hasRoutableIP(network, ifi) +} + +// RoutedInterface returns a network interface that can route IP +// traffic and satisfies flags. It returns nil when an appropriate +// network interface is not found. Network must be "ip", "ip4" or +// "ip6". +func RoutedInterface(network string, flags net.Flags) *net.Interface { + switch network { + case "ip", "ip4", "ip6": + default: + return nil + } + ift, err := net.Interfaces() + if err != nil { + return nil + } + for _, ifi := range ift { + if ifi.Flags&flags != flags { + continue + } + if _, ok := hasRoutableIP(network, &ifi); !ok { + continue + } + return &ifi + } + return nil +} + +func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { + ifat, err := ifi.Addrs() + if err != nil { + return nil, false + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + case *net.IPNet: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + } + } + return nil, false +} + +func routableIP(network string, ip net.IP) net.IP { + if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { + return nil + } + switch network { + case "ip4": + if ip := ip.To4(); ip != nil { + return ip + } + case "ip6": + if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation + return nil + } + if ip := ip.To16(); ip != nil && ip.To4() == nil { + return ip + } + default: + if ip := ip.To4(); ip != nil { + return ip + } + if ip := ip.To16(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go new file mode 100644 index 00000000000..bb34aec0bba --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +const defaultMaxOpenFiles = 256 + +// MaxOpenFiles returns the maximum number of open files for the +// caller's process. +func MaxOpenFiles() int { return maxOpenFiles() } diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go b/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go new file mode 100644 index 00000000000..102bef9309b --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit_stub.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +func maxOpenFiles() int { return defaultMaxOpenFiles } diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go b/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go new file mode 100644 index 00000000000..eb4312ce388 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit_unix.go @@ -0,0 +1,17 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import "syscall" + +func maxOpenFiles() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return defaultMaxOpenFiles + } + return int(rlim.Cur) +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go b/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go new file mode 100644 index 00000000000..de927b56e07 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit_windows.go @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +func maxOpenFiles() int { return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ } diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go new file mode 100644 index 00000000000..e07c015f3fd --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for IP testing. +package nettest // import "golang.org/x/net/internal/nettest" + +import "net" + +// SupportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func SupportsIPv4() bool { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + return false + } + ln.Close() + return true +} + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func SupportsIPv6() bool { + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + return false + } + ln.Close() + return true +} + +// ProtocolNotSupported reports whether err is a protocol not +// supported error. +func ProtocolNotSupported(err error) bool { + return protocolNotSupported(err) +} diff --git a/vendor/golang.org/x/net/internal/nettest/stack_stub.go b/vendor/golang.org/x/net/internal/nettest/stack_stub.go new file mode 100644 index 00000000000..1b5fde1a38a --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack_stub.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +import ( + "fmt" + "runtime" +) + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + return fmt.Sprintf("not supported on %s", runtime.GOOS), false +} diff --git a/vendor/golang.org/x/net/internal/nettest/stack_unix.go b/vendor/golang.org/x/net/internal/nettest/stack_unix.go new file mode 100644 index 00000000000..af89229f48a --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack_unix.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import ( + "fmt" + "os" + "runtime" +) + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + if os.Getuid() != 0 { + return fmt.Sprintf("must be root on %s", runtime.GOOS), false + } + return "", true +} diff --git a/vendor/golang.org/x/net/internal/nettest/stack_windows.go b/vendor/golang.org/x/net/internal/nettest/stack_windows.go new file mode 100644 index 00000000000..a21f4993e72 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack_windows.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import ( + "fmt" + "runtime" + "syscall" +) + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: + // Note: To use a socket of type SOCK_RAW requires administrative privileges. + // Users running Winsock applications that use raw sockets must be a member of + // the Administrators group on the local computer, otherwise raw socket calls + // will fail with an error code of WSAEACCES. On Windows Vista and later, access + // for raw sockets is enforced at socket creation. In earlier versions of Windows, + // access for raw sockets is enforced during other socket operations. + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0) + if err == syscall.WSAEACCES { + return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false + } + if err != nil { + return err.Error(), false + } + syscall.Closesocket(s) + return "", true +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go new file mode 100644 index 00000000000..66325a912a8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeseries + +import ( + "math" + "testing" + "time" +) + +func isNear(x *Float, y float64, tolerance float64) bool { + return math.Abs(x.Value()-y) < tolerance +} + +func isApproximate(x *Float, y float64) bool { + return isNear(x, y, 1e-2) +} + +func checkApproximate(t *testing.T, o Observable, y float64) { + x := o.(*Float) + if !isApproximate(x, y) { + t.Errorf("Wanted %g, got %g", y, x.Value()) + } +} + +func checkNear(t *testing.T, o Observable, y, tolerance float64) { + x := o.(*Float) + if !isNear(x, y, tolerance) { + t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) + } +} + +var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) + +func tu(s int64) time.Time { + return baseTime.Add(time.Duration(s) * time.Second) +} + +func tu2(s int64, ns int64) time.Time { + return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) +} + +func TestBasicTimeSeries(t *testing.T) { + ts := NewTimeSeries(NewFloat) + fo := new(Float) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(1)), 40) + checkApproximate(t, ts.Total(), 40) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 40) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 70) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 60) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 90) + *fo = Float(100) + ts.AddWithTime(fo, tu(100)) + checkApproximate(t, ts.Range(tu(99), tu(100)), 100) + checkApproximate(t, ts.Range(tu(0), tu(4)), 36) + checkApproximate(t, ts.Total(), 190) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(4)), 44) + checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Total(), 210) + + for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { + if i == 63 { + checkApproximate(t, l, 100) + } else { + checkApproximate(t, l, 0) + } + } + + checkApproximate(t, ts.Range(tu(0), tu(100)), 210) + checkApproximate(t, ts.Range(tu(10), tu(100)), 100) + + for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { + if i < 10 { + checkApproximate(t, l, 11) + } else if i >= 90 { + checkApproximate(t, l, 10) + } else { + checkApproximate(t, l, 0) + } + } +} + +func TestFloat(t *testing.T) { + f := Float(1) + if g, w := f.String(), "1"; g != w { + t.Errorf("Float(1).String = %q; want %q", g, w) + } + f2 := Float(2) + var o Observable = &f2 + f.Add(o) + if g, w := f.Value(), 3.0; g != w { + t.Errorf("Float post-add = %v; want %v", g, w) + } + f.Multiply(2) + if g, w := f.Value(), 6.0; g != w { + t.Errorf("Float post-multiply = %v; want %v", g, w) + } + f.Clear() + if g, w := f.Value(), 0.0; g != w { + t.Errorf("Float post-clear = %v; want %v", g, w) + } + f.CopyFrom(&f2) + if g, w := f.Value(), 2.0; g != w { + t.Errorf("Float post-CopyFrom = %v; want %v", g, w) + } +} + +type mockClock struct { + time time.Time +} + +func (m *mockClock) Time() time.Time { return m.time } +func (m *mockClock) Set(t time.Time) { m.time = t } + +const buckets = 6 + +var testResolutions = []time.Duration{ + 10 * time.Second, // level holds one minute of observations + 100 * time.Second, // level holds ten minutes of observations + 10 * time.Minute, // level holds one hour of observations +} + +// TestTimeSeries uses a small number of buckets to force a higher +// error rate on approximations from the timeseries. +type TestTimeSeries struct { + timeSeries +} + +func TestExpectedErrorRate(t *testing.T) { + ts := new(TestTimeSeries) + fake := new(mockClock) + fake.Set(time.Now()) + ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) + for i := 1; i <= 61*61; i++ { + fake.Set(fake.Time().Add(1 * time.Second)) + ob := Float(1) + ts.AddWithTime(&ob, fake.Time()) + + // The results should be accurate within one missing bucket (1/6) of the observations recorded. + checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) + checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) + checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) + } +} + +func min(a, b float64) float64 { + if a < b { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/ipv4/bpf_test.go b/vendor/golang.org/x/net/ipv4/bpf_test.go new file mode 100644 index 00000000000..b44da90549a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/bpf_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv4.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/bpfopt_linux.go b/vendor/golang.org/x/net/ipv4/bpfopt_linux.go new file mode 100644 index 00000000000..f2d00b4cbd5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/bpfopt_linux.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "os" + "unsafe" + + "golang.org/x/net/bpf" +) + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + fd, err := c.sysfd() + if err != nil { + return err + } + prog := sysSockFProg{ + Len: uint16(len(filter)), + Filter: (*sysSockFilter)(unsafe.Pointer(&filter[0])), + } + return os.NewSyscallError("setsockopt", setsockopt(fd, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog)))) +} diff --git a/vendor/golang.org/x/net/ipv4/bpfopt_stub.go b/vendor/golang.org/x/net/ipv4/bpfopt_stub.go new file mode 100644 index 00000000000..c4a8481f087 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/bpfopt_stub.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import "golang.org/x/net/bpf" + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 00000000000..8cadfd7f3ff --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,70 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 00000000000..33d8bc8b381 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_RECVDSTADDR + m.SetLen(syscall.CmsgLen(net.IPv4len)) + return b[syscall.CmsgSpace(net.IPv4len):] +} + +func parseDst(cm *ControlMessage, b []byte) { + cm.Dst = b[:net.IPv4len] +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_RECVIF + m.SetLen(syscall.CmsgLen(syscall.SizeofSockaddrDatalink)) + return b[syscall.CmsgSpace(syscall.SizeofSockaddrDatalink):] +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 00000000000..444782f3979 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux + +package ipv4 + +import ( + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_PKTINFO + m.SetLen(syscall.CmsgLen(sysSizeofInetPktinfo)) + if cm != nil { + pi := (*sysInetPktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return b[syscall.CmsgSpace(sysSizeofInetPktinfo):] +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*sysInetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + cm.Dst = pi.Addr[:] +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 00000000000..4d8507194c3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} + +func newControlMessage(opt *rawOpt) []byte { + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + return nil, errOpNoSupport +} + +func marshalControlMessage(cm *ControlMessage) []byte { + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 00000000000..3000c52e408 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,164 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv4 + +import ( + "os" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if cf&FlagTTL != 0 && sockOpts[ssoReceiveTTL].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveTTL], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if sockOpts[ssoPacketInfo].name > 0 { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := setInt(fd, &sockOpts[ssoPacketInfo], boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if cf&FlagDst != 0 && sockOpts[ssoReceiveDst].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveDst], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if cf&FlagInterface != 0 && sockOpts[ssoReceiveInterface].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveInterface], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + opt.RLock() + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlInterface].length) + } + } + if l > 0 { + oob = make([]byte, l) + b := oob + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + b = ctlOpts[ctlTTL].marshal(b, nil) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + b = ctlOpts[ctlPacketInfo].marshal(b, nil) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + b = ctlOpts[ctlDst].marshal(b, nil) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + b = ctlOpts[ctlInterface].marshal(b, nil) + } + } + } + opt.RUnlock() + return +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + if len(b) == 0 { + return nil, nil + } + cmsgs, err := syscall.ParseSocketControlMessage(b) + if err != nil { + return nil, os.NewSyscallError("parse socket control message", err) + } + cm := &ControlMessage{} + for _, m := range cmsgs { + if m.Header.Level != iana.ProtocolIP { + continue + } + switch int(m.Header.Type) { + case ctlOpts[ctlTTL].name: + ctlOpts[ctlTTL].parse(cm, m.Data[:]) + case ctlOpts[ctlDst].name: + ctlOpts[ctlDst].parse(cm, m.Data[:]) + case ctlOpts[ctlInterface].name: + ctlOpts[ctlInterface].parse(cm, m.Data[:]) + case ctlOpts[ctlPacketInfo].name: + ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) + } + } + return cm, nil +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + if cm == nil { + return nil + } + var l int + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + pktinfo = true + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + if l > 0 { + oob = make([]byte, l) + b := oob + if pktinfo { + b = ctlOpts[ctlPacketInfo].marshal(b, cm) + } + } + return +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIP + m.Type = sysIP_RECVTTL + m.SetLen(syscall.CmsgLen(1)) + return b[syscall.CmsgSpace(1):] +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 00000000000..800f6377952 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,27 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "syscall" + +func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} + +func newControlMessage(opt *rawOpt) []byte { + // TODO(mikio): implement this + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + // TODO(mikio): implement this + return nil, syscall.EWINDOWS +} + +func marshalControlMessage(cm *ControlMessage) []byte { + // TODO(mikio): implement this + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 00000000000..731d56a711d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet C.struct_sockaddr_in + +type sysInetPktinfo C.struct_in_pktinfo + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqn C.struct_ip_mreqn + +type sysIPMreqSource C.struct_ip_mreq_source + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 00000000000..08e3b855d5e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type sysIPMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 00000000000..f12ca327bf6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet C.struct_sockaddr_in + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqn C.struct_ip_mreqn + +type sysIPMreqSource C.struct_ip_mreq_source + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 00000000000..c4042eb60ae --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sysSizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sysSizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqn = C.sizeof_struct_ip_mreqn + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPFilter = C.sizeof_struct_icmp_filter +) + +type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sysSockaddrInet C.struct_sockaddr_in + +type sysInetPktinfo C.struct_in_pktinfo + +type sysSockExtendedErr C.struct_sock_extended_err + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqn C.struct_ip_mreqn + +type sysIPMreqSource C.struct_ip_mreq_source + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req + +type sysICMPFilter C.struct_icmp_filter + +type sysSockFProg C.struct_sock_fprog + +type sysSockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 00000000000..8642354f490 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type sysIPMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 00000000000..8642354f490 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type sysIPMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 00000000000..bb74afa49bd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_NEXTHOP = C.IP_NEXTHOP + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + + sysSizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sysSizeofIPMreq = C.sizeof_struct_ip_mreq + sysSizeofIPMreqSource = C.sizeof_struct_ip_mreq_source +) + +type sysInetPktinfo C.struct_in_pktinfo + +type sysIPMreq C.struct_ip_mreq + +type sysIPMreqSource C.struct_ip_mreq_source diff --git a/vendor/golang.org/x/net/ipv4/dgramopt_posix.go b/vendor/golang.org/x/net/ipv4/dgramopt_posix.go new file mode 100644 index 00000000000..103c4f6daeb --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt_posix.go @@ -0,0 +1,251 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoMulticastTTL]) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastTTL], ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getInterface(fd, &sockOpts[ssoMulticastInterface]) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return false, err + } + on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) +} diff --git a/vendor/golang.org/x/net/ipv4/dgramopt_stub.go b/vendor/golang.org/x/net/ipv4/dgramopt_stub.go new file mode 100644 index 00000000000..b74df693100 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt_stub.go @@ -0,0 +1,106 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +import "net" + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + return 0, errOpNoSupport +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + return errOpNoSupport +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + return nil, errOpNoSupport +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + return errOpNoSupport +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + return false, errOpNoSupport +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + return errOpNoSupport +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 00000000000..9a79badfe24 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,242 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, ipv4.Conn is used to set the type-of-service field on +// the IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of ipv4.PacketConn is used to enable control +// message transmissons. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 00000000000..bc45bf054ca --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "time" +) + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + net.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + return &Conn{ + genericOpt: genericOpt{Conn: c}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + net.PacketConn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + fd, err := c.payloadHandler.sysfd() + if err != nil { + return err + } + return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + p := &PacketConn{ + genericOpt: genericOpt{Conn: c.(net.Conn)}, + dgramOpt: dgramOpt{PacketConn: c}, + payloadHandler: payloadHandler{PacketConn: c}, + } + if _, ok := c.(*net.IPConn); ok && sockOpts[ssoStripHeader].name > 0 { + if fd, err := p.payloadHandler.sysfd(); err == nil { + setInt(fd, &sockOpts[ssoStripHeader], boolint(true)) + } + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + fd, err := c.packetHandler.sysfd() + if err != nil { + return err + } + return setControlMessage(fd, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.c.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + r := &RawConn{ + genericOpt: genericOpt{Conn: c.(net.Conn)}, + dgramOpt: dgramOpt{PacketConn: c}, + packetHandler: packetHandler{c: c.(*net.IPConn)}, + } + fd, err := r.packetHandler.sysfd() + if err != nil { + return nil, err + } + if err := setInt(fd, &sockOpts[ssoHeaderPrepend], boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go new file mode 100644 index 00000000000..4f5e2f31213 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/example_test.go @@ -0,0 +1,224 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "0.0.0.0:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil { + p := ipv4.NewConn(c) + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetTTL(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { + log.Fatal(err) + } + + b := make([]byte, 1500) + for { + _, cm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, nil, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To4() != nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no A record found") + } + + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if err := p.SetTTL(i); err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + if _, err := p.WriteTo(wb, nil, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, cm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(1, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv4.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + case ipv4.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + return + default: + log.Printf("unknown ICMP message: %+v\n", rm) + } + } +} + +func ExampleRawConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + log.Fatal(err) + } + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)} + if err := r.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer r.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 24) // fake ospf header, you need to implement this + ospf[0] = 2 // version 2 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + iph := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 0xc0, // DSCP CS6 + TotalLen: ipv4.HeaderLen + len(ospf), + TTL: 1, + Protocol: 89, + Dst: allSPFRouters.IP.To4(), + } + + var cm *ipv4.ControlMessage + switch runtime.GOOS { + case "darwin", "linux": + cm = &ipv4.ControlMessage{IfIndex: en0.Index} + default: + if err := r.SetMulticastInterface(en0); err != nil { + log.Fatal(err) + } + } + if err := r.WriteTo(iph, ospf, cm); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 00000000000..cbe70327b86 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,208 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + // The ipv4 package still supports go1.2, and so we need to + // take care of additional platforms in go1.3 and above for + // working with go1.2. + switch { + case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": + b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv4\n"), 1) + case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"): + b = bytes.Replace(b, []byte("package ipv4\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv4\n"), 1) + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt_posix.go b/vendor/golang.org/x/net/ipv4/genericopt_posix.go new file mode 100644 index 00000000000..fefa0be36b1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt_posix.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv4 + +import "syscall" + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoTOS]) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoTOS], tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoTTL]) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoTTL], ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt_stub.go b/vendor/golang.org/x/net/ipv4/genericopt_stub.go new file mode 100644 index 00000000000..1817badb171 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt_stub.go @@ -0,0 +1,29 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + return 0, errOpNoSupport +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + return errOpNoSupport +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + return 0, errOpNoSupport +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 00000000000..363d9c21a66 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + "syscall" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of the IPv4 header h. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, syscall.EINVAL + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "freebsd", "netbsd": + nativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + nativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(nativeEndian.Uint16(b[6:8])) + case "freebsd": + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(nativeEndian.Uint16(b[6:8])) + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-HeaderLen > 0 { + h.Options = make([]byte, hdrlen-HeaderLen) + copy(h.Options, b[HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go new file mode 100644 index 00000000000..85cb9c48946 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header_test.go @@ -0,0 +1,138 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "bytes" + "encoding/binary" + "net" + "reflect" + "runtime" + "strings" + "testing" +) + +type headerTest struct { + wireHeaderFromKernel [HeaderLen]byte + wireHeaderToKernel [HeaderLen]byte + wireHeaderFromTradBSDKernel [HeaderLen]byte + wireHeaderFromFreeBSD10Kernel [HeaderLen]byte + wireHeaderToTradBSDKernel [HeaderLen]byte + *Header +} + +var headerLittleEndianTest = headerTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + wireHeaderFromKernel: [HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToKernel: [HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: [HeaderLen]byte{ + 0x45, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromFreeBSD10Kernel: [HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToTradBSDKernel: [HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, +} + +func TestMarshalHeader(t *testing.T) { + tt := &headerLittleEndianTest + if nativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + b, err := tt.Header.Marshal() + if err != nil { + t.Fatal(err) + } + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderToTradBSDKernel[:] + case "freebsd": + if freebsdVersion < 1000000 { + wh = tt.wireHeaderToTradBSDKernel[:] + } else { + wh = tt.wireHeaderFromFreeBSD10Kernel[:] + } + default: + wh = tt.wireHeaderToKernel[:] + } + if !bytes.Equal(b, wh) { + t.Fatalf("got %#v; want %#v", b, wh) + } +} + +func TestParseHeader(t *testing.T) { + tt := &headerLittleEndianTest + if nativeEndian != binary.LittleEndian { + t.Skip("no test for big endian machine yet") + } + + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion < 1000000 { + wh = tt.wireHeaderFromTradBSDKernel[:] + } else { + wh = tt.wireHeaderFromFreeBSD10Kernel[:] + } + default: + wh = tt.wireHeaderFromKernel[:] + } + h, err := ParseHeader(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 00000000000..acecfd0d348 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "errors" + "net" + "unsafe" +) + +var ( + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 + + nativeEndian binary.ByteOrder +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/helper_stub.go b/vendor/golang.org/x/net/ipv4/helper_stub.go new file mode 100644 index 00000000000..dc2120cf2f7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper_stub.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +func (c *genericOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *dgramOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *payloadHandler) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *packetHandler) sysfd() (int, error) { + return 0, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/helper_unix.go b/vendor/golang.org/x/net/ipv4/helper_unix.go new file mode 100644 index 00000000000..345ca7dc7fe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper_unix.go @@ -0,0 +1,50 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv4 + +import ( + "net" + "reflect" +) + +func (c *genericOpt) sysfd() (int, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return 0, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (int, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return 0, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (int, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func (c *packetHandler) sysfd() (int, error) { + return sysfd(c.c) +} + +func sysfd(c net.Conn) (int, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + netfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := netfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return int(fd.Int()), nil + } + } + return 0, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv4/helper_windows.go b/vendor/golang.org/x/net/ipv4/helper_windows.go new file mode 100644 index 00000000000..322b2a5e433 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper_windows.go @@ -0,0 +1,49 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "reflect" + "syscall" +) + +func (c *genericOpt) sysfd() (syscall.Handle, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (syscall.Handle, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (syscall.Handle, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func (c *packetHandler) sysfd() (syscall.Handle, error) { + return sysfd(c.c) +} + +func sysfd(c net.Conn) (syscall.Handle, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + netfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := netfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return syscall.Handle(fd.Uint()), nil + } + } + return syscall.InvalidHandle, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 00000000000..be10c94888a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,34 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 00000000000..dbd05cff2cf --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 2460 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + sysICMPFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 00000000000..c9122533549 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *sysICMPFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *sysICMPFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 00000000000..9ee9b6a3295 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sysSizeofICMPFilter = 0x0 + +type sysICMPFilter struct { +} + +func (f *sysICMPFilter) accept(typ ICMPType) { +} + +func (f *sysICMPFilter) block(typ ICMPType) { +} + +func (f *sysICMPFilter) setAll(block bool) { +} + +func (f *sysICMPFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go new file mode 100644 index 00000000000..3324b54df63 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_test.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var icmpStringTests = []struct { + in ipv4.ICMPType + out string +}{ + {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv4.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv4.ICMPType{ + ipv4.ICMPTypeDestinationUnreachable, + ipv4.ICMPTypeEchoReply, + ipv4.ICMPTypeTimeExceeded, + ipv4.ICMPTypeParameterProblem, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + + var f ipv4.ICMPFilter + f.SetAll(true) + f.Accept(ipv4.ICMPTypeEcho) + f.Accept(ipv4.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv4/mocktransponder_test.go b/vendor/golang.org/x/net/ipv4/mocktransponder_test.go new file mode 100644 index 00000000000..e55aaee9180 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/mocktransponder_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "testing" +) + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go new file mode 100644 index 00000000000..d2bcf8533b8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicast_test.go @@ -0,0 +1,330 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp4", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + switch { + case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} + +var rawConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnReadWriteMulticastICMP(t *testing.T) { + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + if tt.src == nil { + if err := r.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer r.LeaveGroup(ifi, tt.grp) + } else { + if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := r.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := r.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + Protocol: 1, + Dst: tt.grp.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + r.SetMulticastTTL(i + 1) + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + if rh, b, _, err := r.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + switch { + case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go new file mode 100644 index 00000000000..e342bf1d901 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go @@ -0,0 +1,249 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727 + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}, + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + + c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv4.PacketConn + ps[0] = ipv4.NewPacketConn(c1) + ps[1] = ipv4.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp4", ip.String()+":"+"1024") // unicast address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := r.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.RawConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{r, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go new file mode 100644 index 00000000000..c76dbe4def6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -0,0 +1,195 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +var rawConnMulticastSocketOptionTests = []struct { + grp, src net.Addr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnMulticastSocketOptionTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, r, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src) + } + } +} + +type testIPv4MulticastConn interface { + MulticastTTL() (int, error) + SetMulticastTTL(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) { + const ttl = 255 + if err := c.SetMulticastTTL(ttl); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastTTL(); err != nil { + t.Error(err) + return + } else if v != ttl { + t.Errorf("got %v; want %v", v, ttl) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 00000000000..09864314e4b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" +) + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + c *net.IPConn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.c != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, syscall.EINVAL + } + oob := newControlMessage(&c.rawOpt) + n, oobn, _, src, err := c.c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if cm, err = parseControlMessage(oob[:oobn]); err != nil { + return nil, nil, nil, err + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = ipv4.Version +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return syscall.EINVAL + } + oob := marshalControlMessage(cm) + wh, err := h.Marshal() + if err != nil { + return err + } + dst := &net.IPAddr{} + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 00000000000..d7698cbd35f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,15 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "net" + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 00000000000..d358fc3ac4b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,81 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris,!windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + oob := newControlMessage(&c.rawOpt) + var oobn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if sockOpts[ssoStripHeader].name > 0 { + if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + } else { + nb := make([]byte, maxHeaderLen+len(b)) + if n, oobn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + } + default: + return 0, nil, nil, errInvalidConnType + } + if cm, err = parseControlMessage(oob[:oobn]); err != nil { + return 0, nil, nil, err + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + oob := marshalControlMessage(cm) + if dst == nil { + return 0, errMissingAddress + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, errInvalidConnType + } + if err != nil { + return 0, err + } + return +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 00000000000..d128c9c2e74 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 solaris windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go new file mode 100644 index 00000000000..247d06c1a88 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_test.go @@ -0,0 +1,174 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { + c, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + return nil, nil, err + } + dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) + if err != nil { + c.Close() + return nil, nil, err + } + return c, dst, nil +} + +func BenchmarkReadWriteNetUDP(b *testing.B) { + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteNetUDP(b, c, wb, rb, dst) + } +} + +func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func BenchmarkReadWriteIPv4UDP(b *testing.B) { + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteIPv4UDP(b, p, wb, rb, dst, ifi) + } +} + +func benchmarkReadWriteIPv4UDP(b *testing.B, p *ipv4.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) { + cm := ipv4.ControlMessage{TTL: 1} + if ifi != nil { + cm.IfIndex = ifi.Index + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } else if n != len(wb) { + b.Fatalf("got %v; want %v", n, len(wb)) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("short write: %v", n) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 00000000000..ace37d30f5f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,46 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoMax +) + +// Sticky socket option value types +const ( + ssoTypeByte = iota + 1 + ssoTypeInt + ssoTypeInterface + ssoTypeICMPFilter + ssoTypeIPMreq + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + name int // option name, must be equal or greater than 1 + typ int // option value type, must be equal or greater than 1 +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go new file mode 100644 index 00000000000..4a6aa78eff5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd windows + +package ipv4 + +import "net" + +func setIPMreqInterface(mreq *sysIPMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go new file mode 100644 index 00000000000..45551528b38 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!windows + +package ipv4 + +import "net" + +func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func getsockoptInterface(fd, name int) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func setsockoptInterface(fd, name int, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go new file mode 100644 index 00000000000..7b5c3290d9e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_unix.go @@ -0,0 +1,46 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func setsockoptIPMreq(fd, name int, ifi *net.Interface, grp net.IP) error { + mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreq), sysSizeofIPMreq)) +} + +func getsockoptInterface(fd, name int) (*net.Interface, error) { + var b [4]byte + l := uint32(4) + if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setsockoptInterface(fd, name int, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), uint32(4))) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go new file mode 100644 index 00000000000..431930df750 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreq_windows.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "os" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func setsockoptIPMreq(fd syscall.Handle, name int, ifi *net.Interface, grp net.IP) error { + mreq := sysIPMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&mreq)), int32(sysSizeofIPMreq))) +} + +func getsockoptInterface(fd syscall.Handle, name int) (*net.Interface, error) { + var b [4]byte + l := int32(4) + if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setsockoptInterface(fd syscall.Handle, name int, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(name), (*byte)(unsafe.Pointer(&b[0])), 4)) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go new file mode 100644 index 00000000000..332f403e8ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!windows + +package ipv4 + +import "net" + +func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go new file mode 100644 index 00000000000..1f2b9a14b40 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { + var mreqn sysIPMreqn + l := uint32(sysSizeofIPMreqn) + if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setsockoptIPMreqn(fd, name int, ifi *net.Interface, grp net.IP) error { + var mreqn sysIPMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), sysSizeofIPMreqn)) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go new file mode 100644 index 00000000000..85465244767 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import "net" + +func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go new file mode 100644 index 00000000000..0a672b6acd9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var freebsd32o64 bool + +func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { + var gr sysGroupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupReq + 4]byte + s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupReq + 4 + } else { + p = unsafe.Pointer(&gr) + l = sysSizeofGroupReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) +} + +func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { + var gsr sysGroupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupSourceReq + 4]byte + s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupSourceReq + 4 + } else { + p = unsafe.Pointer(&gsr) + l = sysSizeofGroupSourceReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 00000000000..9d19f5dfed6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,11 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +func setInt(fd int, opt *sockOpt, v int) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_unix.go b/vendor/golang.org/x/net/ipv4/sockopt_unix.go new file mode 100644 index 00000000000..f7acc6b9a17 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_unix.go @@ -0,0 +1,122 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv4 + +import ( + "net" + "os" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func getInt(fd int, opt *sockOpt) (int, error) { + if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { + return 0, errOpNoSupport + } + var i int32 + var b byte + p := unsafe.Pointer(&i) + l := uint32(4) + if opt.typ == ssoTypeByte { + p = unsafe.Pointer(&b) + l = 1 + } + if err := getsockopt(fd, iana.ProtocolIP, opt.name, p, &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + if opt.typ == ssoTypeByte { + return int(b), nil + } + return int(i), nil +} + +func setInt(fd int, opt *sockOpt, v int) error { + if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) { + return errOpNoSupport + } + i := int32(v) + var b byte + p := unsafe.Pointer(&i) + l := uint32(4) + if opt.typ == ssoTypeByte { + b = byte(v) + p = unsafe.Pointer(&b) + l = 1 + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, opt.name, p, l)) +} + +func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 { + return nil, errOpNoSupport + } + switch opt.typ { + case ssoTypeInterface: + return getsockoptInterface(fd, opt.name) + case ssoTypeIPMreqn: + return getsockoptIPMreqn(fd, opt.name) + default: + return nil, errOpNoSupport + } +} + +func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 { + return errOpNoSupport + } + switch opt.typ { + case ssoTypeInterface: + return setsockoptInterface(fd, opt.name, ifi) + case ssoTypeIPMreqn: + return setsockoptIPMreqn(fd, opt.name, ifi, nil) + default: + return errOpNoSupport + } +} + +func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return nil, errOpNoSupport + } + var f ICMPFilter + l := uint32(sysSizeofICMPFilter) + if err := getsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + return &f, nil +} + +func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return errOpNoSupport + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), sysSizeofICMPFilter)) +} + +func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 { + return errOpNoSupport + } + switch opt.typ { + case ssoTypeIPMreq: + return setsockoptIPMreq(fd, opt.name, ifi, grp) + case ssoTypeIPMreqn: + return setsockoptIPMreqn(fd, opt.name, ifi, grp) + case ssoTypeGroupReq: + return setsockoptGroupReq(fd, opt.name, ifi, grp) + default: + return errOpNoSupport + } +} + +func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { + return errOpNoSupport + } + return setsockoptGroupSourceReq(fd, opt.name, ifi, grp, src) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_windows.go b/vendor/golang.org/x/net/ipv4/sockopt_windows.go new file mode 100644 index 00000000000..c4c2441ec51 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_windows.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "os" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { + if opt.name < 1 || opt.typ != ssoTypeInt { + return 0, errOpNoSupport + } + var i int32 + l := int32(4) + if err := syscall.Getsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + return int(i), nil +} + +func setInt(fd syscall.Handle, opt *sockOpt, v int) error { + if opt.name < 1 || opt.typ != ssoTypeInt { + return errOpNoSupport + } + i := int32(v) + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, iana.ProtocolIP, int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) +} + +func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return nil, errOpNoSupport + } + return getsockoptInterface(fd, opt.name) +} + +func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return errOpNoSupport + } + return setsockoptInterface(fd, opt.name, ifi) +} + +func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { + return errOpNoSupport +} + +func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeIPMreq { + return errOpNoSupport + } + return setsockoptIPMreq(fd, opt.name, ifi, grp) +} + +func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + // TODO(mikio): implement this + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 00000000000..203033db0fe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,34 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd + +package ipv4 + +import ( + "net" + "syscall" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 00000000000..b5f5bd51516 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,96 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoStripHeader: {sysIP_STRIPHDR, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + osver, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + var i int + for i = range osver { + if osver[i] == '.' { + break + } + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like + // those features require OS X 10.8 (Darwin 12.0.0) and above. + // See http://support.apple.com/kb/HT1633. + if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sysSizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo].name = sysIP_RECVPKTINFO + sockOpts[ssoPacketInfo].typ = ssoTypeInt + sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn + sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP + sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq + sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP + sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq + sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP + sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP + sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE + sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE + sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + } +} + +func (pi *sysInetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 00000000000..163ff9a7742 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,73 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sysSizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 00000000000..73e0d462384 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sysSizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeIPMreqn}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoPacketInfo: {sysIP_PKTINFO, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoICMPFilter: {sysICMP_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func (pi *sysInetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_openbsd.go b/vendor/golang.org/x/net/ipv4/sys_openbsd.go new file mode 100644 index 00000000000..d78083a2853 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_openbsd.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeByte}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeByte}, + ssoReceiveTTL: {sysIP_RECVTTL, ssoTypeInt}, + ssoReceiveDst: {sysIP_RECVDSTADDR, ssoTypeInt}, + ssoReceiveInterface: {sysIP_RECVIF, ssoTypeInt}, + ssoHeaderPrepend: {sysIP_HDRINCL, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 00000000000..c8e55cbc80c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 00000000000..466489fe06d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sysSizeofInetPktinfo = 0x8 + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqSource = 0xc +) + +type sysInetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type sysIPMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type sysIPMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{ + ssoTOS: {sysIP_TOS, ssoTypeInt}, + ssoTTL: {sysIP_TTL, ssoTypeInt}, + ssoMulticastTTL: {sysIP_MULTICAST_TTL, ssoTypeInt}, + ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastLoopback: {sysIP_MULTICAST_LOOP, ssoTypeInt}, + ssoJoinGroup: {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq}, + ssoLeaveGroup: {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq}, + } +) + +func (pi *sysInetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/syscall_linux_386.go b/vendor/golang.org/x/net/ipv4/syscall_linux_386.go new file mode 100644 index 00000000000..07a3a282214 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/syscall_linux_386.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "syscall" + "unsafe" +) + +const ( + sysGETSOCKOPT = 0xf + sysSETSOCKOPT = 0xe +) + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/syscall_unix.go b/vendor/golang.org/x/net/ipv4/syscall_unix.go new file mode 100644 index 00000000000..88a41b0c0a6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/syscall_unix.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!386 netbsd openbsd + +package ipv4 + +import ( + "syscall" + "unsafe" +) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/thunk_linux_386.s b/vendor/golang.org/x/net/ipv4/thunk_linux_386.s new file mode 100644 index 00000000000..daa78bc02dd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/thunk_linux_386.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.2 + +TEXT ·socketcall(SB),4,$0-36 + JMP syscall·socketcall(SB) diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go new file mode 100644 index 00000000000..9c632cd898f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicast_test.go @@ -0,0 +1,246 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveUDPAddr("udp4", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + loop: + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} + +func TestRawConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + TTL: i + 1, + Protocol: 1, + Dst: dst.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + loop: + if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if _, b, _, err := r.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go new file mode 100644 index 00000000000..25606f21da3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -0,0 +1,139 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp4", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewConn(c)) + + <-done +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp4", "", "127.0.0.1:0"}, + {"ip4", ":icmp", "127.0.0.1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewPacketConn(c)) + } +} + +func TestRawConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + + testUnicastSocketOptions(t, r) +} + +type testIPv4UnicastConn interface { + TOS() (int, error) + SetTOS(int) error + TTL() (int, error) + SetTTL(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) { + tos := iana.DiffServCS0 | iana.NotECNTransport + switch runtime.GOOS { + case "windows": + // IP_TOS option is supported on Windows 8 and beyond. + t.Skipf("not supported on %s", runtime.GOOS) + } + + if err := c.SetTOS(tos); err != nil { + t.Fatal(err) + } + if v, err := c.TOS(); err != nil { + t.Fatal(err) + } else if v != tos { + t.Fatalf("got %v; want %v", v, tos) + } + const ttl = 255 + if err := c.SetTTL(ttl); err != nil { + t.Fatal(err) + } + if v, err := c.TTL(); err != nil { + t.Fatal(err) + } else if v != ttl { + t.Fatalf("got %v; want %v", v, ttl) + } +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 00000000000..087c6390639 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysInetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 00000000000..f5c9ccec4fb --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,33 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +// +build dragonfly + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sysSizeofIPMreq = 0x8 +) + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 00000000000..6fd67e1e995 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysSockaddrStorage + Source sysSockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 00000000000..ebac6d7926d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 00000000000..ebac6d7926d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 00000000000..3733152a494 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,146 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 00000000000..afa4519067f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 00000000000..3733152a494 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,146 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 00000000000..129a20ac61e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,arm64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 00000000000..7ed9368f500 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 00000000000..19fadae626f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64le + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 00000000000..15426beeeb0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 00000000000..beaadd5f04b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 00000000000..0eb2623050b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64le + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 00000000000..90fe99ebb06 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,s390x + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet = 0x10 + sysSizeofInetPktinfo = 0xc + sysSizeofSockExtendedErr = 0x10 + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqn = 0xc + sysSizeofIPMreqSource = 0xc + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPFilter = 0x4 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sysInetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysSockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type sysIPMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPFilter struct { + Data uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 00000000000..8a440eb65b8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sysSizeofIPMreq = 0x8 +) + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 00000000000..fd522b57388 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sysSizeofIPMreq = 0x8 +) + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 00000000000..d7c23349a34 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +// +build solaris + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + sysIP_NEXTHOP = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + + sysSizeofInetPktinfo = 0xc + + sysSizeofIPMreq = 0x8 + sysSizeofIPMreqSource = 0xc +) + +type sysInetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sysIPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type sysIPMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv6/bpf_test.go b/vendor/golang.org/x/net/ipv6/bpf_test.go new file mode 100644 index 00000000000..03d478dc036 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/bpf_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv6" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + + l, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv6.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp6", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/bpfopt_linux.go b/vendor/golang.org/x/net/ipv6/bpfopt_linux.go new file mode 100644 index 00000000000..066ef20380b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/bpfopt_linux.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "os" + "unsafe" + + "golang.org/x/net/bpf" +) + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + fd, err := c.sysfd() + if err != nil { + return err + } + prog := sysSockFProg{ + Len: uint16(len(filter)), + Filter: (*sysSockFilter)(unsafe.Pointer(&filter[0])), + } + return os.NewSyscallError("setsockopt", setsockopt(fd, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog)))) +} diff --git a/vendor/golang.org/x/net/ipv6/bpfopt_stub.go b/vendor/golang.org/x/net/ipv6/bpfopt_stub.go new file mode 100644 index 00000000000..2e4de5f0d52 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/bpfopt_stub.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import "golang.org/x/net/bpf" + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 00000000000..b7362aae794 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,85 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 00000000000..80ec2e2f0bd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_2292HOPLIMIT + m.SetLen(syscall.CmsgLen(4)) + if cm != nil { + data := b[syscall.CmsgLen(0):] + nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) + } + return b[syscall.CmsgSpace(4):] +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_2292PKTINFO + m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + if cm != nil { + pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_2292NEXTHOP + m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + if cm != nil { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 00000000000..f344d16d039 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,99 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_TCLASS + m.SetLen(syscall.CmsgLen(4)) + if cm != nil { + data := b[syscall.CmsgLen(0):] + nativeEndian.PutUint32(data[:4], uint32(cm.TrafficClass)) + } + return b[syscall.CmsgSpace(4):] +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(nativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_HOPLIMIT + m.SetLen(syscall.CmsgLen(4)) + if cm != nil { + data := b[syscall.CmsgLen(0):] + nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) + } + return b[syscall.CmsgSpace(4):] +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(nativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_PKTINFO + m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo)) + if cm != nil { + pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):] +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[0])) + cm.Dst = pi.Addr[:] + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_NEXTHOP + m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6)) + if cm != nil { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):] +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + m.Level = iana.ProtocolIPv6 + m.Type = sysIPV6_PATHMTU + m.SetLen(syscall.CmsgLen(sysSizeofIPv6Mtuinfo)) + return b[syscall.CmsgSpace(sysSizeofIPv6Mtuinfo):] +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*sysIPv6Mtuinfo)(unsafe.Pointer(&b[0])) + cm.Dst = mi.Addr.Addr[:] + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 00000000000..2fecf7e5ce4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + return nil, errOpNoSupport +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 00000000000..2af5beb43e1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,166 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "os" + "syscall" + + "golang.org/x/net/internal/iana" +) + +func setControlMessage(fd int, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if cf&FlagTrafficClass != 0 && sockOpts[ssoReceiveTrafficClass].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveTrafficClass], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if cf&FlagHopLimit != 0 && sockOpts[ssoReceiveHopLimit].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceiveHopLimit], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if cf&flagPacketInfo != 0 && sockOpts[ssoReceivePacketInfo].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceivePacketInfo], boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if cf&FlagPathMTU != 0 && sockOpts[ssoReceivePathMTU].name > 0 { + if err := setInt(fd, &sockOpts[ssoReceivePathMTU], boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + opt.RLock() + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += syscall.CmsgSpace(ctlOpts[ctlPathMTU].length) + } + if l > 0 { + oob = make([]byte, l) + b := oob + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + b = ctlOpts[ctlTrafficClass].marshal(b, nil) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + b = ctlOpts[ctlHopLimit].marshal(b, nil) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + b = ctlOpts[ctlPacketInfo].marshal(b, nil) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + b = ctlOpts[ctlPathMTU].marshal(b, nil) + } + } + opt.RUnlock() + return +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + if len(b) == 0 { + return nil, nil + } + cmsgs, err := syscall.ParseSocketControlMessage(b) + if err != nil { + return nil, os.NewSyscallError("parse socket control message", err) + } + cm := &ControlMessage{} + for _, m := range cmsgs { + if m.Header.Level != iana.ProtocolIPv6 { + continue + } + switch int(m.Header.Type) { + case ctlOpts[ctlTrafficClass].name: + ctlOpts[ctlTrafficClass].parse(cm, m.Data[:]) + case ctlOpts[ctlHopLimit].name: + ctlOpts[ctlHopLimit].parse(cm, m.Data[:]) + case ctlOpts[ctlPacketInfo].name: + ctlOpts[ctlPacketInfo].parse(cm, m.Data[:]) + case ctlOpts[ctlPathMTU].name: + ctlOpts[ctlPathMTU].parse(cm, m.Data[:]) + } + } + return cm, nil +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + if cm == nil { + return + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += syscall.CmsgSpace(ctlOpts[ctlNextHop].length) + } + if l > 0 { + oob = make([]byte, l) + b := oob + if tclass { + b = ctlOpts[ctlTrafficClass].marshal(b, cm) + } + if hoplimit { + b = ctlOpts[ctlHopLimit].marshal(b, cm) + } + if pktinfo { + b = ctlOpts[ctlPacketInfo].marshal(b, cm) + } + if nexthop { + b = ctlOpts[ctlNextHop].marshal(b, cm) + } + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 00000000000..72fdc1b0382 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "syscall" + +func setControlMessage(fd syscall.Handle, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} + +func newControlMessage(opt *rawOpt) (oob []byte) { + // TODO(mikio): implement this + return nil +} + +func parseControlMessage(b []byte) (*ControlMessage, error) { + // TODO(mikio): implement this + return nil, syscall.EWINDOWS +} + +func marshalControlMessage(cm *ControlMessage) (oob []byte) { + // TODO(mikio): implement this + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 00000000000..4c7f476a8d1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 00000000000..c72487ceb7a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 00000000000..de199ec6a7f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrStorage C.struct_sockaddr_storage + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 00000000000..664305d8bd5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sysSizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sysSizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sysSizeofGroupReq = C.sizeof_struct_group_req + sysSizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysKernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6FlowlabelReq C.struct_in6_flowlabel_req + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysGroupReq C.struct_group_req + +type sysGroupSourceReq C.struct_group_source_req + +type sysICMPv6Filter C.struct_icmp6_filter + +type sysSockFProg C.struct_sock_fprog + +type sysSockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 00000000000..7bd09e8e883 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 00000000000..6796d9b2f52 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 00000000000..972b171260f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,96 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sysSizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sysSizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sysSizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sysSizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sysSizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sysSockaddrInet6 C.struct_sockaddr_in6 + +type sysInet6Pktinfo C.struct_in6_pktinfo + +type sysIPv6Mtuinfo C.struct_ip6_mtuinfo + +type sysIPv6Mreq C.struct_ipv6_mreq + +type sysICMPv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt_posix.go b/vendor/golang.org/x/net/ipv6/dgramopt_posix.go new file mode 100644 index 00000000000..93ff2f1af3a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt_posix.go @@ -0,0 +1,288 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoMulticastHopLimit]) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastHopLimit], hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getInterface(fd, &sockOpts[ssoMulticastInterface]) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInterface(fd, &sockOpts[ssoMulticastInterface], ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return false, err + } + on, err := getInt(fd, &sockOpts[ssoMulticastLoopback]) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoMulticastLoopback], boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoJoinGroup], ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return setGroup(fd, &sockOpts[ssoLeaveGroup], ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoJoinSourceGroup], ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoBlockSourceGroup], ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return setSourceGroup(fd, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return false, 0, err + } + offset, err = getInt(fd, &sockOpts[ssoChecksum]) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + if !on { + offset = -1 + } + return setInt(fd, &sockOpts[ssoChecksum], offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return nil, err + } + return getICMPFilter(fd, &sockOpts[ssoICMPFilter]) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setICMPFilter(fd, &sockOpts[ssoICMPFilter], f) +} diff --git a/vendor/golang.org/x/net/ipv6/dgramopt_stub.go b/vendor/golang.org/x/net/ipv6/dgramopt_stub.go new file mode 100644 index 00000000000..fb067fb2ff7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt_stub.go @@ -0,0 +1,119 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +import "net" + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + return 0, errOpNoSupport +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + return errOpNoSupport +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + return nil, errOpNoSupport +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + return errOpNoSupport +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + return false, errOpNoSupport +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + return errOpNoSupport +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + return errOpNoSupport +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + return errOpNoSupport +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + return false, 0, errOpNoSupport +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + return errOpNoSupport +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 00000000000..dd13aa21f86 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,240 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 2460. +// Basic and advanced socket interface extensions are defined in RFC +// 3493 and RFC 3542. +// Socket interface extensions for multicast source filters are +// defined in RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, ipv6.Conn is used to set the traffic class field on the +// IPv6 header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of ipv6.PacketConn is used to enable control +// message transmissons. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 00000000000..966eaa8920d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,123 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "time" +) + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + net.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.genericOpt.ok() { + return 0, syscall.EINVAL + } + fd, err := c.genericOpt.sysfd() + if err != nil { + return 0, err + } + _, mtu, err := getMTUInfo(fd, &sockOpts[ssoPathMTU]) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + return &Conn{ + genericOpt: genericOpt{Conn: c}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + net.PacketConn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + fd, err := c.payloadHandler.sysfd() + if err != nil { + return err + } + return setControlMessage(fd, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + return &PacketConn{ + genericOpt: genericOpt{Conn: c.(net.Conn)}, + dgramOpt: dgramOpt{PacketConn: c}, + payloadHandler: payloadHandler{PacketConn: c}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go new file mode 100644 index 00000000000..e761aa2a174 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/example_test.go @@ -0,0 +1,216 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "log" + "net" + "os" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "[::]:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil { + p := ipv6.NewConn(c) + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetHopLimit(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + b := make([]byte, 1500) + for { + _, rcm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + wcm.IfIndex = rcm.IfIndex + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, &wcm, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To16() != nil && ip.To4() == nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no AAAA record found") + } + + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + wcm.HopLimit = i + if _, err := p.WriteTo(wb, &wcm, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, rcm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv6.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + case ipv6.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + return + } + } +} + +func ExamplePacketConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")} + if err := p.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 16) // fake ospf header, you need to implement this + ospf[0] = 3 // version 3 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + if err := p.SetChecksum(true, 12); err != nil { + log.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: 0xc0, // DSCP CS6 + HopLimit: 1, + IfIndex: en0.Index, + } + if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 00000000000..826e3ae286c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,208 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + // The ipv6 package still supports go1.2, and so we need to + // take care of additional platforms in go1.3 and above for + // working with go1.2. + switch { + case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris": + b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1) + case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"): + b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1) + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt_posix.go b/vendor/golang.org/x/net/ipv6/genericopt_posix.go new file mode 100644 index 00000000000..dd77a0167e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt_posix.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd windows + +package ipv6 + +import "syscall" + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoTrafficClass]) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoTrafficClass], tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return 0, err + } + return getInt(fd, &sockOpts[ssoHopLimit]) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + fd, err := c.sysfd() + if err != nil { + return err + } + return setInt(fd, &sockOpts[ssoHopLimit], hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt_stub.go b/vendor/golang.org/x/net/ipv6/genericopt_stub.go new file mode 100644 index 00000000000..f5c3722421d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt_stub.go @@ -0,0 +1,30 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + return 0, errOpNoSupport +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + return errOpNoSupport +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + return 0, errOpNoSupport +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 00000000000..e05cb08b21c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go new file mode 100644 index 00000000000..ca11dc23deb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "strings" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv6" +) + +var ( + wireHeaderFromKernel = [ipv6.HeaderLen]byte{ + 0x69, 0x8b, 0xee, 0xf1, + 0xca, 0xfe, 0x2c, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + } + + testHeader = &ipv6.Header{ + Version: ipv6.Version, + TrafficClass: iana.DiffServAF43, + FlowLabel: 0xbeef1, + PayloadLen: 0xcafe, + NextHeader: iana.ProtocolIPv6Frag, + HopLimit: 1, + Src: net.ParseIP("2001:db8:1::1"), + Dst: net.ParseIP("2001:db8:2::1"), + } +) + +func TestParseHeader(t *testing.T) { + h, err := ipv6.ParseHeader(wireHeaderFromKernel[:]) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 00000000000..53b99990587 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,53 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "errors" + "net" + "unsafe" +) + +var ( + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + + nativeEndian binary.ByteOrder +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/helper_stub.go b/vendor/golang.org/x/net/ipv6/helper_stub.go new file mode 100644 index 00000000000..20354ab2f3c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper_stub.go @@ -0,0 +1,19 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +func (c *genericOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *dgramOpt) sysfd() (int, error) { + return 0, errOpNoSupport +} + +func (c *payloadHandler) sysfd() (int, error) { + return 0, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/helper_unix.go b/vendor/golang.org/x/net/ipv6/helper_unix.go new file mode 100644 index 00000000000..92868ed2955 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper_unix.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "net" + "reflect" +) + +func (c *genericOpt) sysfd() (int, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return 0, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (int, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return 0, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (int, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func sysfd(c net.Conn) (int, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + nfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := nfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return int(fd.Int()), nil + } + } + return 0, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv6/helper_windows.go b/vendor/golang.org/x/net/ipv6/helper_windows.go new file mode 100644 index 00000000000..28c401b53cb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper_windows.go @@ -0,0 +1,45 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "reflect" + "syscall" +) + +func (c *genericOpt) sysfd() (syscall.Handle, error) { + switch p := c.Conn.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + return sysfd(p) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *dgramOpt) sysfd() (syscall.Handle, error) { + switch p := c.PacketConn.(type) { + case *net.UDPConn, *net.IPConn: + return sysfd(p.(net.Conn)) + } + return syscall.InvalidHandle, errInvalidConnType +} + +func (c *payloadHandler) sysfd() (syscall.Handle, error) { + return sysfd(c.PacketConn.(net.Conn)) +} + +func sysfd(c net.Conn) (syscall.Handle, error) { + cv := reflect.ValueOf(c) + switch ce := cv.Elem(); ce.Kind() { + case reflect.Struct: + netfd := ce.FieldByName("conn").FieldByName("fd") + switch fe := netfd.Elem(); fe.Kind() { + case reflect.Struct: + fd := fe.FieldByName("sysfd") + return syscall.Handle(fd.Uint()), nil + } + } + return syscall.InvalidHandle, errInvalidConnType +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 00000000000..3c6214fb696 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,82 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 00000000000..a2de65a08c1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 2460 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + sysICMPv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 00000000000..30e3ce424d0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 00000000000..a67ecf690cd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *sysICMPv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 00000000000..a942f354ca5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package ipv6 + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 00000000000..c1263ecac49 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package ipv6 + +type sysICMPv6Filter struct { +} + +func (f *sysICMPv6Filter) accept(typ ICMPType) { +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { +} + +func (f *sysICMPv6Filter) setAll(block bool) { +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go new file mode 100644 index 00000000000..e192d6d8c22 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_test.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var icmpStringTests = []struct { + in ipv6.ICMPType + out string +}{ + {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv6.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv6.ICMPType{ + ipv6.ICMPTypeDestinationUnreachable, + ipv6.ICMPTypeEchoReply, + ipv6.ICMPTypeNeighborSolicitation, + ipv6.ICMPTypeDuplicateAddressConfirmation, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoRequest) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 00000000000..9dcfb8106b5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,26 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +type sysICMPv6Filter struct { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *sysICMPv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go new file mode 100644 index 00000000000..d587922a14c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "testing" +) + +func connector(t *testing.T, network, addr string, done chan<- bool) { + defer func() { done <- true }() + + c, err := net.Dial(network, addr) + if err != nil { + t.Error(err) + return + } + c.Close() +} + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go new file mode 100644 index 00000000000..a3a8979d29e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicast_test.go @@ -0,0 +1,260 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "freebsd": // due to a bug on loopback marking + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp6", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatal(err) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "freebsd": // due to a bug on loopback marking + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP) + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + t.Fatal(err) + } + } else { + psh = pshicmp + // Some platforms never allow to + // disable the kernel checksum + // processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go new file mode 100644 index 00000000000..9711f7513f8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go @@ -0,0 +1,246 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727 + &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}, + &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + + c2, err := net.ListenPacket("udp6", "[ff02::]:1024") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv6.PacketConn + ps[0] = ipv6.NewPacketConn(c1) + ps[1] = ipv6.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp6", fmt.Sprintf("[%s%%%s]:1024", ip.String(), ifi.Name)) // unicast address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip6:ipv6-icmp", fmt.Sprintf("%s%%%s", ip.String(), ifi.Name)) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go new file mode 100644 index 00000000000..fe0e6e1b146 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -0,0 +1,157 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727 + + {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +type testIPv6MulticastConn interface { + MulticastHopLimit() (int, error) + SetMulticastHopLimit(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) { + const hoplim = 255 + if err := c.SetMulticastHopLimit(hoplim); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastHopLimit(); err != nil { + t.Error(err) + return + } else if v != hoplim { + t.Errorf("got %v; want %v", v, hoplim) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 00000000000..529b20bcaf5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,15 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "net" + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 00000000000..8e90d324d2d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,70 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + oob := newControlMessage(&c.rawOpt) + var oobn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, errInvalidConnType + } + if cm, err = parseControlMessage(oob[:oobn]); err != nil { + return 0, nil, nil, err + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + oob := marshalControlMessage(cm) + if dst == nil { + return 0, errMissingAddress + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, errInvalidConnType + } + if err != nil { + return 0, err + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 00000000000..499204d0c78 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go new file mode 100644 index 00000000000..8c8c6fde08e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_test.go @@ -0,0 +1,189 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func benchmarkUDPListener() (net.PacketConn, net.Addr, error) { + c, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + return nil, nil, err + } + dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) + if err != nil { + c.Close() + return nil, nil, err + } + return c, dst, nil +} + +func BenchmarkReadWriteNetUDP(b *testing.B) { + if !supportsIPv6 { + b.Skip("ipv6 is not supported") + } + + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteNetUDP(b, c, wb, rb, dst) + } +} + +func benchmarkReadWriteNetUDP(b *testing.B, c net.PacketConn, wb, rb []byte, dst net.Addr) { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func BenchmarkReadWriteIPv6UDP(b *testing.B) { + if !supportsIPv6 { + b.Skip("ipv6 is not supported") + } + + c, dst, err := benchmarkUDPListener() + if err != nil { + b.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkReadWriteIPv6UDP(b, p, wb, rb, dst, ifi) + } +} + +func benchmarkReadWriteIPv6UDP(b *testing.B, p *ipv6.PacketConn, wb, rb []byte, dst net.Addr, ifi *net.Interface) { + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } else if n != len(wb) { + b.Fatalf("got %v; want %v", n, len(wb)) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %v; want %v", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 00000000000..f0cfc2f94a1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,46 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoMax +) + +// Sticky socket option value types +const ( + ssoTypeInt = iota + 1 + ssoTypeInterface + ssoTypeICMPFilter + ssoTypeMTUInfo + ssoTypeIPMreq + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + level int // option level + name int // option name, must be equal or greater than 1 + typ int // option value type, must be equal or greater than 1 +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go new file mode 100644 index 00000000000..b7fd4fe670e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_unix.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "net" + "os" + "unsafe" +) + +func setsockoptIPMreq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + var mreq sysIPv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mreq), sysSizeofIPv6Mreq)) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go new file mode 100644 index 00000000000..c03c731342f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_asmreq_windows.go @@ -0,0 +1,21 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "os" + "syscall" + "unsafe" +) + +func setsockoptIPMreq(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + var mreq sysIPv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&mreq)), sysSizeofIPv6Mreq)) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go new file mode 100644 index 00000000000..7732e49f881 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv6 + +import "net" + +func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go new file mode 100644 index 00000000000..a36a7e03d82 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv6 + +import ( + "net" + "os" + "unsafe" +) + +var freebsd32o64 bool + +func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + var gr sysGroupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupReq + 4]byte + s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupReq + 4 + } else { + p = unsafe.Pointer(&gr) + l = sysSizeofGroupReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) +} + +func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + var gsr sysGroupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var p unsafe.Pointer + var l uint32 + if freebsd32o64 { + var d [sysSizeofGroupSourceReq + 4]byte + s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupSourceReq + 4 + } else { + p = unsafe.Pointer(&gsr) + l = sysSizeofGroupSourceReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 00000000000..b8dacfde9e4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +import "net" + +func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go new file mode 100644 index 00000000000..9c21903160b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_test.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var supportsIPv6 bool = nettest.SupportsIPv6() + +func TestConnInitiatorPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestConnResponderPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go connector(t, "tcp6", ln.Addr().String(), done) + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestPacketConnChecksum(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + offset := 12 // see RFC 5340 + + for _, toggle := range []bool{false, true} { + if err := p.SetChecksum(toggle, offset); err != nil { + if toggle { + t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } else { + // Some platforms never allow to disable the kernel + // checksum processing. + t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } + } + if on, offset, err := p.Checksum(); err != nil { + t.Fatal(err) + } else { + t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_unix.go b/vendor/golang.org/x/net/ipv6/sockopt_unix.go new file mode 100644 index 00000000000..7115b18ee56 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_unix.go @@ -0,0 +1,122 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package ipv6 + +import ( + "net" + "os" + "unsafe" +) + +func getInt(fd int, opt *sockOpt) (int, error) { + if opt.name < 1 || opt.typ != ssoTypeInt { + return 0, errOpNoSupport + } + var i int32 + l := uint32(4) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + return int(i), nil +} + +func setInt(fd int, opt *sockOpt, v int) error { + if opt.name < 1 || opt.typ != ssoTypeInt { + return errOpNoSupport + } + i := int32(v) + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) +} + +func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return nil, errOpNoSupport + } + var i int32 + l := uint32(4) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + if i == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(i)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setInterface(fd int, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return errOpNoSupport + } + var i int32 + if ifi != nil { + i = int32(ifi.Index) + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) +} + +func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return nil, errOpNoSupport + } + var f ICMPFilter + l := uint32(sysSizeofICMPv6Filter) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + return &f, nil +} + +func setICMPFilter(fd int, opt *sockOpt, f *ICMPFilter) error { + if opt.name < 1 || opt.typ != ssoTypeICMPFilter { + return errOpNoSupport + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), sysSizeofICMPv6Filter)) +} + +func getMTUInfo(fd int, opt *sockOpt) (*net.Interface, int, error) { + if opt.name < 1 || opt.typ != ssoTypeMTUInfo { + return nil, 0, errOpNoSupport + } + var mi sysIPv6Mtuinfo + l := uint32(sysSizeofIPv6Mtuinfo) + if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mi), &l); err != nil { + return nil, 0, os.NewSyscallError("getsockopt", err) + } + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func setGroup(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 { + return errOpNoSupport + } + switch opt.typ { + case ssoTypeIPMreq: + return setsockoptIPMreq(fd, opt, ifi, grp) + case ssoTypeGroupReq: + return setsockoptGroupReq(fd, opt, ifi, grp) + default: + return errOpNoSupport + } +} + +func setSourceGroup(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq { + return errOpNoSupport + } + return setsockoptGroupSourceReq(fd, opt, ifi, grp, src) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_windows.go b/vendor/golang.org/x/net/ipv6/sockopt_windows.go new file mode 100644 index 00000000000..32c73b722c3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_windows.go @@ -0,0 +1,86 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "os" + "syscall" + "unsafe" +) + +func getInt(fd syscall.Handle, opt *sockOpt) (int, error) { + if opt.name < 1 || opt.typ != ssoTypeInt { + return 0, errOpNoSupport + } + var i int32 + l := int32(4) + if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { + return 0, os.NewSyscallError("getsockopt", err) + } + return int(i), nil +} + +func setInt(fd syscall.Handle, opt *sockOpt, v int) error { + if opt.name < 1 || opt.typ != ssoTypeInt { + return errOpNoSupport + } + i := int32(v) + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) +} + +func getInterface(fd syscall.Handle, opt *sockOpt) (*net.Interface, error) { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return nil, errOpNoSupport + } + var i int32 + l := int32(4) + if err := syscall.Getsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), &l); err != nil { + return nil, os.NewSyscallError("getsockopt", err) + } + if i == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(i)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func setInterface(fd syscall.Handle, opt *sockOpt, ifi *net.Interface) error { + if opt.name < 1 || opt.typ != ssoTypeInterface { + return errOpNoSupport + } + var i int32 + if ifi != nil { + i = int32(ifi.Index) + } + return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd, int32(opt.level), int32(opt.name), (*byte)(unsafe.Pointer(&i)), 4)) +} + +func getICMPFilter(fd syscall.Handle, opt *sockOpt) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func setICMPFilter(fd syscall.Handle, opt *sockOpt, f *ICMPFilter) error { + return errOpNoSupport +} + +func getMTUInfo(fd syscall.Handle, opt *sockOpt) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} + +func setGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp net.IP) error { + if opt.name < 1 || opt.typ != ssoTypeIPMreq { + return errOpNoSupport + } + return setsockoptIPMreq(fd, opt, ifi, grp) +} + +func setSourceGroup(fd syscall.Handle, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { + // TODO(mikio): implement this + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 00000000000..0ee43e6d206 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, + ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, + ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, + ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + } +) + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 00000000000..c263f08d744 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sysSizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_2292PKTINFO, ssoTypeInt}, + ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + osver, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + var i int + for i = range osver { + if osver[i] == '.' { + break + } + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11.0.0). But it looks like + // those features require OS X 10.8 (Darwin 12.0.0) and above. + // See http://support.apple.com/kb/HT1633. + if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '2' { + ctlOpts[ctlTrafficClass].name = sysIPV6_TCLASS + ctlOpts[ctlTrafficClass].length = 4 + ctlOpts[ctlTrafficClass].marshal = marshalTrafficClass + ctlOpts[ctlTrafficClass].parse = parseTrafficClass + ctlOpts[ctlHopLimit].name = sysIPV6_HOPLIMIT + ctlOpts[ctlHopLimit].marshal = marshalHopLimit + ctlOpts[ctlPacketInfo].name = sysIPV6_PKTINFO + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlNextHop].name = sysIPV6_NEXTHOP + ctlOpts[ctlNextHop].length = sysSizeofSockaddrInet6 + ctlOpts[ctlNextHop].marshal = marshalNextHop + ctlOpts[ctlNextHop].parse = parseNextHop + ctlOpts[ctlPathMTU].name = sysIPV6_PATHMTU + ctlOpts[ctlPathMTU].length = sysSizeofIPv6Mtuinfo + ctlOpts[ctlPathMTU].marshal = marshalPathMTU + ctlOpts[ctlPathMTU].parse = parsePathMTU + sockOpts[ssoTrafficClass].level = iana.ProtocolIPv6 + sockOpts[ssoTrafficClass].name = sysIPV6_TCLASS + sockOpts[ssoTrafficClass].typ = ssoTypeInt + sockOpts[ssoReceiveTrafficClass].level = iana.ProtocolIPv6 + sockOpts[ssoReceiveTrafficClass].name = sysIPV6_RECVTCLASS + sockOpts[ssoReceiveTrafficClass].typ = ssoTypeInt + sockOpts[ssoReceiveHopLimit].name = sysIPV6_RECVHOPLIMIT + sockOpts[ssoReceivePacketInfo].name = sysIPV6_RECVPKTINFO + sockOpts[ssoReceivePathMTU].level = iana.ProtocolIPv6 + sockOpts[ssoReceivePathMTU].name = sysIPV6_RECVPATHMTU + sockOpts[ssoReceivePathMTU].typ = ssoTypeInt + sockOpts[ssoPathMTU].level = iana.ProtocolIPv6 + sockOpts[ssoPathMTU].name = sysIPV6_PATHMTU + sockOpts[ssoPathMTU].typ = ssoTypeMTUInfo + sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP + sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq + sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP + sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq + sockOpts[ssoJoinSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP + sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoLeaveSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP + sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoBlockSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE + sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq + sockOpts[ssoUnblockSourceGroup].level = iana.ProtocolIPv6 + sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE + sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq + } +} + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_0[0])) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Pad_cgo_1[0])) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 00000000000..5527001f122 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sysSizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, + ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, + ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, + ssoChecksum: {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sysSizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 00000000000..fd7d5b188ae --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,72 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sysSizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sysSizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = [ssoMax]sockOpt{ + ssoTrafficClass: {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}, + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}, + ssoReceiveHopLimit: {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}, + ssoReceivePacketInfo: {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}, + ssoReceivePathMTU: {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}, + ssoPathMTU: {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}, + ssoChecksum: {iana.ProtocolReserved, sysIPV6_CHECKSUM, ssoTypeInt}, + ssoICMPFilter: {iana.ProtocolIPv6ICMP, sysICMPV6_FILTER, ssoTypeICMPFilter}, + ssoJoinGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}, + ssoJoinSourceGroup: {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}, + } +) + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *sysInet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *sysGroupReq) setGroup(grp net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *sysGroupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sysSockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 00000000000..ead0f4d11bc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 00000000000..fda875736fe --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,63 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sysSizeofSockaddrInet6 = 0x1c + + sysSizeofIPv6Mreq = 0x14 +) + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = [ssoMax]sockOpt{ + ssoHopLimit: {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt}, + ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface}, + ssoMulticastHopLimit: {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt}, + ssoMulticastLoopback: {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt}, + ssoJoinGroup: {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq}, + ssoLeaveGroup: {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq}, + } +) + +func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *sysIPv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/syscall_linux_386.go b/vendor/golang.org/x/net/ipv6/syscall_linux_386.go new file mode 100644 index 00000000000..64a3c6653a3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/syscall_linux_386.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "syscall" + "unsafe" +) + +const ( + sysGETSOCKOPT = 0xf + sysSETSOCKOPT = 0xe +) + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/syscall_unix.go b/vendor/golang.org/x/net/ipv6/syscall_unix.go new file mode 100644 index 00000000000..925fd2fb23b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/syscall_unix.go @@ -0,0 +1,26 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!386 netbsd openbsd + +package ipv6 + +import ( + "syscall" + "unsafe" +) + +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { + return error(errno) + } + return nil +} + +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { + if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/thunk_linux_386.s b/vendor/golang.org/x/net/ipv6/thunk_linux_386.s new file mode 100644 index 00000000000..daa78bc02dd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/thunk_linux_386.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.2 + +TEXT ·socketcall(SB),4,$0-36 + JMP syscall·socketcall(SB) diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go new file mode 100644 index 00000000000..db5b08a28ca --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicast_test.go @@ -0,0 +1,182 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveUDPAddr("udp6", c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveIPAddr("ip6", "::1") + if err != nil { + t.Fatal(err) + } + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + t.Fatal(err) + } + } else { + psh = pshicmp + // Some platforms never allow to disable the + // kernel checksum processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go new file mode 100644 index 00000000000..7bb2e440ac4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -0,0 +1,111 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewConn(c)) + + <-done +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp6", "", "[::1]:0"}, + {"ip6", ":ipv6-icmp", "::1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewPacketConn(c)) + } +} + +type testIPv6UnicastConn interface { + TrafficClass() (int, error) + SetTrafficClass(int) error + HopLimit() (int, error) + SetHopLimit(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) { + tclass := iana.DiffServCS0 | iana.NotECNTransport + if err := c.SetTrafficClass(tclass); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_TCLASS option + t.Logf("not supported on %s", runtime.GOOS) + goto next + } + t.Fatal(err) + } + if v, err := c.TrafficClass(); err != nil { + t.Fatal(err) + } else if v != tclass { + t.Fatalf("got %v; want %v", v, tclass) + } + +next: + hoplim := 255 + if err := c.SetHopLimit(hoplim); err != nil { + t.Fatal(err) + } + if v, err := c.HopLimit(); err != nil { + t.Fatal(err) + } else if v != hoplim { + t.Fatalf("got %v; want %v", v, hoplim) + } +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 00000000000..cb044b0336a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 00000000000..5a03ab7340a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,90 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +// +build dragonfly + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 00000000000..4ace96f0c5b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysSockaddrStorage + Source sysSockaddrStorage +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 00000000000..4a62c2d5c0c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 00000000000..4a62c2d5c0c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysSockaddrStorage + Source sysSockaddrStorage +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 00000000000..36fccbb6255 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,168 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 00000000000..7461e7e03c1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 00000000000..36fccbb6255 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,168 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 00000000000..ed35f6039a9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,arm64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 00000000000..141c86977ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 00000000000..d50eb633efa --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,mips64le + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 00000000000..4c58ea67d4d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x84 + sysSizeofGroupSourceReq = 0x104 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 00000000000..c1d775f7721 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 00000000000..e385fb7aa10 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,ppc64le + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 00000000000..28d69b1b0eb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +// +build linux,s390x + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sysSizeofKernelSockaddrStorage = 0x80 + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + sysSizeofIPv6FlowlabelReq = 0x20 + + sysSizeofIPv6Mreq = 0x14 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysKernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type sysGroupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage +} + +type sysGroupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sysKernelSockaddrStorage + Source sysKernelSockaddrStorage +} + +type sysICMPv6Filter struct { + Data [8]uint32 +} + +type sysSockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sysSockFilter +} + +type sysSockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 00000000000..d6ec88e39b7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 00000000000..3e080b78aad --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sysSizeofSockaddrInet6 = 0x1c + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x20 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 00000000000..cdf00c25d91 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,105 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +// +build solaris + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sysSizeofSockaddrInet6 = 0x20 + sysSizeofInet6Pktinfo = 0x14 + sysSizeofIPv6Mtuinfo = 0x24 + + sysSizeofIPv6Mreq = 0x14 + + sysSizeofICMPv6Filter = 0x20 +) + +type sysSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type sysInet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type sysIPv6Mtuinfo struct { + Addr sysSockaddrInet6 + Mtu uint32 +} + +type sysIPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type sysICMPv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 00000000000..bd0ec24f444 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,312 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "strings" + "unicode/utf8" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/vendor/golang.org/x/net/lex/httplex/httplex_test.go new file mode 100644 index 00000000000..c4ace1991b3 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex_test.go @@ -0,0 +1,101 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httplex + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if IsTokenRune(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} + +func TestHeaderValuesContainsToken(t *testing.T) { + tests := []struct { + vals []string + token string + want bool + }{ + { + vals: []string{"foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"bar", "foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo"}, + token: "bar", + want: false, + }, + { + vals: []string{" foo "}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar,foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo ,bar "}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar, foo ,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + } + for _, tt := range tests { + got := HeaderValuesContainsToken(tt.vals, tt.token) + if got != tt.want { + t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go new file mode 100644 index 00000000000..b317ba2e6a0 --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package netutil // import "golang.org/x/net/netutil" + +import ( + "net" + "sync" +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go new file mode 100644 index 00000000000..c1a3d5527ca --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen_test.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package netutil + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/internal/nettest" +) + +func TestLimitListener(t *testing.T) { + const max = 5 + attempts := (nettest.MaxOpenFiles() - max) / 2 + if attempts > 256 { // maximum length of accept queue is 128 by default + attempts = 256 + } + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + l = LimitListener(l, max) + + var open int32 + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > max { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + })) + + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Log(err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the kernel's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +type errorListener struct { + net.Listener +} + +func (errorListener) Accept() (net.Conn, error) { + return nil, errFake +} + +var errFake = errors.New("fake error from errorListener") + +// This used to hang. +func TestLimitListenerError(t *testing.T) { + donec := make(chan bool, 1) + go func() { + const n = 2 + ll := LimitListener(errorListener{}, n) + for i := 0; i < n+1; i++ { + _, err := ll.Accept() + if err != errFake { + t.Fatalf("Accept error = %v; want errFake", err) + } + } + donec <- true + }() + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout. deadlock?") + } +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 00000000000..4c5ad88b1e7 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 00000000000..f540b196f7d --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the hostname +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone "example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a hostname +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a hostname that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go new file mode 100644 index 00000000000..a7d80957113 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host_test.go @@ -0,0 +1,55 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "net" + "reflect" + "testing" +) + +type recordingProxy struct { + addrs []string +} + +func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { + r.addrs = append(r.addrs, addr) + return nil, errors.New("recordingProxy") +} + +func TestPerHost(t *testing.T) { + var def, bypass recordingProxy + perHost := NewPerHost(&def, &bypass) + perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") + + expectedDef := []string{ + "example.com:123", + "1.2.3.4:123", + "[1001::]:123", + } + expectedBypass := []string{ + "localhost:123", + "zone:123", + "foo.zone:123", + "127.0.0.1:123", + "10.1.2.3:123", + "[1000::]:123", + } + + for _, addr := range expectedDef { + perHost.Dial("tcp", addr) + } + for _, addr := range expectedBypass { + perHost.Dial("tcp", addr) + } + + if !reflect.DeepEqual(expectedDef, def.addrs) { + t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) + } + if !reflect.DeepEqual(expectedBypass, bypass.addrs) { + t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) + } +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 00000000000..78a8b7bee97 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,94 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := os.Getenv("no_proxy") + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go new file mode 100644 index 00000000000..c19a5c06353 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy_test.go @@ -0,0 +1,142 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "io" + "net" + "net/url" + "strconv" + "sync" + "testing" +) + +func TestFromURL(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg) + + url, err := url.Parse("socks5://user:password@" + gateway.Addr().String()) + if err != nil { + t.Fatalf("url.Parse failed: %v", err) + } + proxy, err := FromURL(url, Direct) + if err != nil { + t.Fatalf("FromURL failed: %v", err) + } + _, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Fatalf("net.SplitHostPort failed: %v", err) + } + if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil { + t.Fatalf("FromURL.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func TestSOCKS5(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg) + + proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct) + if err != nil { + t.Fatalf("SOCKS5 failed: %v", err) + } + if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil { + t.Fatalf("SOCKS5.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) { + defer wg.Done() + + c, err := gateway.Accept() + if err != nil { + t.Errorf("net.Listener.Accept failed: %v", err) + return + } + defer c.Close() + + b := make([]byte, 32) + var n int + if typ == socks5Domain { + n = 4 + } else { + n = 3 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } + if typ == socks5Domain { + n = 16 + } else { + n = 10 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ { + t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3]) + return + } + if typ == socks5Domain { + copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9}) + b = append(b, []byte("localhost")...) + } else { + copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4}) + } + host, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Errorf("net.SplitHostPort failed: %v", err) + return + } + b = append(b, []byte(net.ParseIP(host).To4())...) + p, err := strconv.Atoi(port) + if err != nil { + t.Errorf("strconv.Atoi failed: %v", err) + return + } + b = append(b, []byte{byte(p >> 8), byte(p)}...) + if _, err := c.Write(b); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 00000000000..9b9628239a1 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,210 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "io" + "net" + "strconv" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928. +func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { + s := &socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type socks5 struct { + user, password string + network, addr string + forward Dialer +} + +const socks5Version = 5 + +const ( + socks5AuthNone = 0 + socks5AuthPassword = 2 +) + +const socks5Connect = 1 + +const ( + socks5IP4 = 1 + socks5Domain = 3 + socks5IP6 = 4 +) + +var socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the network net via the SOCKS5 proxy. +func (s *socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + closeConn := &conn + defer func() { + if closeConn != nil { + (*closeConn).Close() + } + }() + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return nil, errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + if buf[1] == socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, socks5IP4) + ip = ip4 + } else { + buf = append(buf, socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return nil, errors.New("proxy: destination hostname too long: " + host) + } + buf = append(buf, socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(socks5Errors) { + failure = socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case socks5IP4: + bytesToDiscard = net.IPv4len + case socks5IP6: + bytesToDiscard = net.IPv6len + case socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + closeConn = nil + return conn, nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 00000000000..a2d4995292f --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 9 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 00000000000..8bbf3bcd7ef --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1< len(b[j]) +} + +// eTLDPlusOneTestCases come from +// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt +var eTLDPlusOneTestCases = []struct { + domain, want string +}{ + // Empty input. + {"", ""}, + // Unlisted TLD. + {"example", ""}, + {"example.example", "example.example"}, + {"b.example.example", "example.example"}, + {"a.b.example.example", "example.example"}, + // TLD with only 1 rule. + {"biz", ""}, + {"domain.biz", "domain.biz"}, + {"b.domain.biz", "domain.biz"}, + {"a.b.domain.biz", "domain.biz"}, + // TLD with some 2-level rules. + {"com", ""}, + {"example.com", "example.com"}, + {"b.example.com", "example.com"}, + {"a.b.example.com", "example.com"}, + {"uk.com", ""}, + {"example.uk.com", "example.uk.com"}, + {"b.example.uk.com", "example.uk.com"}, + {"a.b.example.uk.com", "example.uk.com"}, + {"test.ac", "test.ac"}, + // TLD with only 1 (wildcard) rule. + {"mm", ""}, + {"c.mm", ""}, + {"b.c.mm", "b.c.mm"}, + {"a.b.c.mm", "b.c.mm"}, + // More complex TLD. + {"jp", ""}, + {"test.jp", "test.jp"}, + {"www.test.jp", "test.jp"}, + {"ac.jp", ""}, + {"test.ac.jp", "test.ac.jp"}, + {"www.test.ac.jp", "test.ac.jp"}, + {"kyoto.jp", ""}, + {"test.kyoto.jp", "test.kyoto.jp"}, + {"ide.kyoto.jp", ""}, + {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"c.kobe.jp", ""}, + {"b.c.kobe.jp", "b.c.kobe.jp"}, + {"a.b.c.kobe.jp", "b.c.kobe.jp"}, + {"city.kobe.jp", "city.kobe.jp"}, + {"www.city.kobe.jp", "city.kobe.jp"}, + // TLD with a wildcard rule and exceptions. + {"ck", ""}, + {"test.ck", ""}, + {"b.test.ck", "b.test.ck"}, + {"a.b.test.ck", "b.test.ck"}, + {"www.ck", "www.ck"}, + {"www.www.ck", "www.ck"}, + // US K12. + {"us", ""}, + {"test.us", "test.us"}, + {"www.test.us", "test.us"}, + {"ak.us", ""}, + {"test.ak.us", "test.ak.us"}, + {"www.test.ak.us", "test.ak.us"}, + {"k12.ak.us", ""}, + {"test.k12.ak.us", "test.k12.ak.us"}, + {"www.test.k12.ak.us", "test.k12.ak.us"}, + // Punycoded IDN labels + {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, + {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, + {"xn--55qx5d.cn", ""}, + {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, + {"xn--fiqs8s", ""}, +} + +func TestEffectiveTLDPlusOne(t *testing.T) { + for _, tc := range eTLDPlusOneTestCases { + got, _ := EffectiveTLDPlusOne(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go new file mode 100644 index 00000000000..00fa1ef5d42 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -0,0 +1,8973 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = "publicsuffix.org's public_suffix_list.dat, git revision fb4a6bce72a86feaf6c38f0a43cd05baf97a9258 (2016-07-07T00:50:50Z)" + +const ( + nodesBitsChildren = 9 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 +) + +// numTLD is the number of top level domains. +const numTLD = 1552 + +// Text is the combined text of all labels. +const text = "biellaakesvuemieleccebieszczadygeyachimataipeigersundrangedalivo" + + "rnoddabievatmallorcafederationikonantanangerbifukagawalmartatesh" + + "inanomachintaijinfolldalomzansimagicasadelamonedatsunanjoetsuwan" + + "ouchikujogaszkoladbrokesamsclubindalorenskogliwicebihorologyusui" + + "sserveexchangebikedagestangeorgeorgiabilbaogakievenesamsunglobal" + + "ashovhachinohedmarkhangelskatowicebillustrationinohekinannestadr" + + "ivelandrobaknoluoktainaikawachinaganoharamcoalaheadjudaicable-mo" + + "dembetsukuintuitateyamabiomutashinainuyamanouchikuhokuryugasakit" + + "ashiobarabirdartcenterprisesakikonaircraftraeumtgeradealstahauge" + + "sundunloppacificaseihichisobetsuitairabirkenesoddtangenovaravenn" + + "agatorockartuzyuudmurtiabirthplacebjarkoyuzawabjerkreimdbalatino" + + "rdkappgafanpachigasakidsmynasperschlesisches3-sa-east-1bjugniezn" + + "ordre-landunsandvikcoromantovalle-d-aostatoilotenkawablockbuster" + + "nidupontariobloombergbauernrtatsunobloxcmsanfranciscofreakunemur" + + "orangeiseiyoichiropracticasertaishinomakikuchikuseikarugaulardal" + + "ottebluedaplierneuesangobmoattachmentsanjotattoolsztynsettlersan" + + "naninomiyakonojoshkar-olayangroupaleobmsannohelplfinancialottoko" + + "namegatakatorinvestmentsanokatsushikabeeldengeluidurbanamexhibit" + + "ionirasakis-a-candidatebmweirbnpparibaselburglobodoes-itverranza" + + "nquannefrankfurtaxihuanishiazais-a-catererbomloansantabarbarabon" + + "durhamburglogowfarmsteadvrcambridgestonewspaperbonnishigotsukiso" + + "fukushimaritimodenakanojohanamakinoharabookingloppenzaogashimada" + + "chicagoboatsantacruzsantafedextraspace-to-rentalstomakomaibarabo" + + "otsanukis-a-celticsfanishiharaboschaefflerdalouvreitgoryuzhno-sa" + + "khalinskatsuyamaseratis-a-chefarsundvrdnsfor-better-thandabostik" + + "aufenishiizunazukis-a-conservativefsncfdwgmbhartiffanybostonakij" + + "insekikogentingminakamichiharabotanicalgardenishikatakazakis-a-c" + + "padoval-daostavalleybotanicgardenishikatsuragithubusercontentjel" + + "dsundyndns-ipalermomasvuotnakatombetsupplybotanybouncemerckautok" + + "einobounty-fullensakerrypropertiesaotomeloyalistockholmestrandyn" + + "dns-mailowiczest-le-patrondheimperiaboutiquebecngmodellingmxfini" + + "tybozentsujiiebradescorporationishikawazukanazawabrandywinevalle" + + "ybrasiliabresciabrindisibenikebristolgapartmentsapodhalewismille" + + "rbritishcolumbialowiezaganishimerabroadcastleclercasinore-og-uvd" + + "alucaniabroadwaybroke-itjmaxxxjaworznobrokerbronnoysundyndns-off" + + "ice-on-the-webcampobassociatesapporobrothermesaverdeatnuorogersv" + + "palmspringsakerbrumunddaluccapitalonewhollandyndns-picsaratovall" + + "eaostavernishinomiyashironobrunelblagdenesnaaseralingenkainanaej" + + "rietisalatinabenoboribetsucksardegnamsosnowiecateringebudejjuedi" + + "schesapeakebayernurembergrimstadyndns-remotegildeskalmykiabrusse" + + "lsardiniabruxellesarlucernebryanskjervoyagebryneustarhubalestran" + + "dabergamoarekemrbuskerudinewhampshirechtrainingripebuzenishinoom" + + "otegotvalled-aostavropolitiendabuzzgorzeleccolognewmexicoldwarmi" + + "amiastaplesarpsborgriwataraidyndns-servercellikes-piedmontblanco" + + "meeresarufutsunomiyawakasaikaitakoenigrondarbwhalingrongabzhitom" + + "irkutskleppamperedchefashionishinoshimatta-varjjatjometlifeinsur" + + "ancecomputerhistoryofscience-fictioncomsecuritytacticsavonamssko" + + "ganeis-a-designerimarumorimachidacondoshichinohealthcareersaxoco" + + "nferenceconstructionconsuladoharuhrconsultanthropologyconsulting" + + "volluzerncontactoyosatoyokawacontemporaryarteducationalchikugojo" + + "medio-campidano-mediocampidanomediocontractorskenconventureshino" + + "desashibetsuikimobetsuliguriacookingchannelveruminamibosogndalvi" + + "vano-frankivskfhappoumuenchencoolkuszgradcooperaunitemasekhabaro" + + "vskhakassiacopenhagencyclopedichernihivanovosibirskydivingrosset" + + "ouchijiwadeloittevadsoccertificationissandnessjoenissayokoshibah" + + "ikariwanumataketomisatomobellevuelosangelesjaguarchitecturealtyc" + + "hyattorneyagawalbrzycharternopilawalesundyndns-wikinderoycorsica" + + "hcesuolocalhistorybnikahokutoeiheijis-a-doctoraycorvettenrightat" + + "homegoodsbschokoladencosenzakopanerairguardcostumedizinhistorisc" + + "hescholarshipschoolcouchpotatofrieschulezajskharkivgucciprianiig" + + "ataiwanairforcertmgretachikawakuyabukicks-assedichernivtsiciliac" + + "ouncilcouponschwarzgwangjuifminamidaitomangotembaixadacourseschw" + + "eizippodlasiellakasamatsudovre-eikercq-acranbrookuwanalyticscien" + + "cecentersciencehistorycreditcardcreditunioncremonashorokanaiecre" + + "wildlifedjejuegoshikiminokamoenairlinedre-eikercricketrzyncrimea" + + "crotonewportlligatewaycrownprovidercrscientistor-elvdalcruisescj" + + "ohnsoncryptonomichigangwoncuisinellahppiacenzamamibuilderscotlan" + + "dculturalcentertainmentoyotaris-a-financialadvisor-aurdalcuneocu" + + "pcakecxn--1ctwolominamatambovalledaostamayukis-a-geekgalaxycymru" + + "ovatoyotomiyazakis-a-greencyonabarussiacyouthdfcbankzjcbnlfieldf" + + "iguerestaurantoyotsukaidownloadfilateliafilminamiechizenfinalfin" + + "ancefineartscrappinguovdageaidnulsandoyfinlandfinnoyfirebaseappa" + + "raglidingushikamifuranoshiroomurafirenzefirestonextdirectoyouraf" + + "irmdaleirfjordfishingolffanserveftparisor-fronfitjarqhachiojiyah" + + "ikobeatservegame-serverisignfitnessettlementoystre-slidrettozawa" + + "fjalerflesbergxn--1lqs71dflickragerotikamakurazakinkobayashiksha" + + "cknetnedalflightservehalflifestyleflirumannortonsbergzlgfloginto" + + "gurafloraflorencefloridafloristanohatakaharulvikhmelnitskiyamasf" + + "jordenfloromskoguchikuzenflowerservehttparliamentozsdeflsmidthru" + + "heredstonexus-east-1flynnhubalsfjordishakotankarumaifarmerseinew" + + "yorkshirecreationaturbruksgymnaturhistorisches3-us-gov-west-1fnd" + + "foodnetworkshoppingfor-ourfor-someetranbyfor-theaterforexrothach" + + "irogatakamoriokamikitayamatotakadaforgotdnservehumourforli-cesen" + + "a-forlicesenaforlikescandyndns-at-workinggrouparmaforsaleirvikhm" + + "elnytskyivalleeaosteigenforsandasuoloftrani-andria-barletta-tran" + + "i-andriafortmissoulan-udefenseljordfortworthadanotaireserveirche" + + "rnovtsykkylvenetogakushimotoganewjerseyforuminamifuranofosneserv" + + "eminecraftraniandriabarlettatraniandriafotaruis-a-gurunzenfoxfor" + + "degreefreeboxostrowiechiryukyuragifudaigodoesntexistanbullensvan" + + "guardyndns-workisboringroundhandlingroznyfreemasonryfreiburgfrei" + + "ghtcmwilliamhillfreseniuscountryestateofdelawaredumbrellajollame" + + "ricanexpressexyzparocherkasyzrankoshigayaltaikis-a-hard-workerfr" + + "ibourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venez" + + "ia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriuliv" + + "e-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafri" + + "ulivgiuliafrlfroganservemp3utilitiesquarezzoologicalvinklein-add" + + "rammenuernbergdyniabcn-north-1kappleaseating-organicbcg12000emma" + + "fanconagawakayamadridvagsoyericsson-aptibleangaviikadenaamesjevu" + + "emielno-ip6frognfrolandfrom-akrehamnfrom-alfrom-arfrom-azwinbalt" + + "imore-og-romsdalimitedunetbankasaokamisatokamachippubetsubetsuga" + + "ruconnectarumizusawaukraanghkebinagisochildrensgardenasushiobara" + + "bruzzoologyeongbuk-uralsk12from-capetownnews-stagingfrom-collect" + + "ionfrom-ctranoyfrom-dchitachinakagawassamukawataricohdavvenjarga" + + "usdalukowhoswhokksundynnsasayamafrom-dell-ogliastrakhanawatchese" + + "rvep2parservepicservequakefrom-flanderservesarcasmatartanddesign" + + "from-gafrom-higashiagatsumagoirminamiiselectransportrapaniimimat" + + "akatsukis-a-hunterfrom-iafrom-idfrom-ilfrom-incheonfrom-kservice" + + "settsurfastlyfrom-kyotobetsumidatlantichitosetogitsuldaluroyfrom" + + "-lanbibaidarfrom-mansionsevastopolefrom-mdfrom-megurorostrowwlkp" + + "mgfrom-microsoftbankhvanylvenicefrom-mnfrom-mochizukirafrom-msev" + + "enassisicilyfrom-mtnfrom-nchloefrom-ndfrom-nefrom-nhktravelchann" + + "elfrom-njcpartis-a-knightravelersinsurancefrom-nminamiizukamiton" + + "dabayashiogamagoriziafrom-nvaolbia-tempio-olbiatempioolbialystok" + + "kemerovodkagoshimaizurubtsovskjakdnepropetrovskiervaapsteiermark" + + "labudhabikinokawabarthadselfipartnersewindmillfrom-nyfrom-ohkura" + + "from-oketohmanxn--1qqw23afrom-orfrom-paderbornfrom-pratohnoshooo" + + "shikamaishimofusartsfranziskanerdpolicefrom-rivnefrom-schoenbrun" + + "nfrom-sdnipropetrovskypescaravantaafrom-tnfrom-txn--2m4a15efrom-" + + "utazuerichardlillehammerfest-mon-blogueurovisionfrom-vaksdalfrom" + + "-vtrdfrom-wafrom-wielunnerfrom-wvareserveblogspotrentino-a-adige" + + "from-wyfrosinonefrostalowa-wolawafroyahabaghdadultrentino-aadige" + + "fstcgroupartshangrilangevagrarboretumbriamallamagentositelefonic" + + "aaarborteaches-yogasawaracingroks-theatreefujiiderafujikawaguchi" + + "konefujiminohtawaramotoineppugliafujinomiyadafujiokayamaoris-a-l" + + "andscaperugiafujisatoshonairportland-4-salernogatagajobojis-a-la" + + "wyerfujisawafujishiroishidakabiratoridellogliastraderfujitsuruga" + + "shimamateramodalenfujixeroxn--30rr7yfujiyoshidafukayabeardubaidu" + + "ckdnsdojoburgfukuchiyamadafukudominichocolatelevisionissedalutsk" + + "azimierz-dolnyfukuis-a-liberalfukumitsubishigakirkenesharis-a-li" + + "bertarianfukuokazakirovogradoyfukuroishikarikaturindalfukusakiry" + + "uohaebaruminamimakis-a-linux-useranishiaritabashikaoizumizakitau" + + "rayasudafukuyamagatakahashimamakisarazurewebsiteshikagamiishibuk" + + "awafunabashiriuchinadafunagatakahatakaishimoichinosekigaharafuna" + + "hashikamiamakusatsumasendaisennangonohejis-a-llamarylandfundacio" + + "fuoiskujukuriyamarburgfuosskoczowindowsharpartyfurnitureggio-cal" + + "abriafurubiraquarellebesbyglandfurudonostiafurukawairtelecityeat" + + "shawaiijimarugame-hostingfusodegaurafussaikishiwadafutabayamaguc" + + "hinomigawafutboldlygoingnowhere-for-moregontrailroadfuttsurugimi" + + "namiminowafvgfyis-a-musicianfylkesbiblackfridayfyresdalhannovarg" + + "gatrentino-alto-adigehanyuzenhapmirhareidsbergenharstadharvestce" + + "lebrationhasamarahasaminami-alpssells-itrentino-altoadigehashban" + + "ghasudahasura-appassagenshimokitayamahasvikmshimonitayanagivestb" + + "ytomaritimekeepinghatogayahoohatoyamazakitahatakanabeautydalhats" + + "ukaichikaiseis-a-painteractivegarsheis-a-patsfanhattfjelldalhaya" + + "shimamotobuildinghazuminobusellsyourhomeipassenger-associationhb" + + "oehringerikehelsinkitahiroshimarriottrentino-s-tirollagrigentomo" + + "logyhembygdsforbundhemneshimonosekikawahemsedalhepforgeherokussl" + + "dheroyhgtvaroyhigashichichibungotakadatinghigashihiroshimanehiga" + + "shiizumozakitakamiizumisanofidelityumenhigashikagawahigashikagur" + + "asoedahigashikawakitaaikitakatakanezawahigashikurumeiwamarshalls" + + "tatebankokonoehigashimatsushimarinehigashimatsuyamakitaakitadait" + + "oigawahigashimurayamalatvuopmidoris-a-personaltrainerhigashinaru" + + "sembokukitakyushuaiahigashinehigashiomihachimanchesterhigashiosa" + + "kasayamamotorcycleshimosuwalkis-a-photographerokuappaviancarboni" + + "a-iglesias-carboniaiglesiascarboniahigashishirakawamatakaokamiko" + + "aniikappulawyhigashisumiyoshikawaminamiaikitamidsundhigashitsuno" + + "tteroyhigashiurausukitamotosumitakaginankokubunjis-a-playerhigas" + + "hiyamatokoriyamanakakogawahigashiyodogawahigashiyoshinogaris-a-r" + + "epublicancerresearchaeologicaliforniahiraizumisatohobby-sitehira" + + "katashinagawahiranairtraffichonanbugattipschmidtre-gauldaluxuryh" + + "irarahiratsukagawahirayaitakarazukamiminershimotsukehistorichous" + + "eshimotsumahitachiomiyaginowaniihamatamakawajimarcheapfizerhitac" + + "hiotagooglecodespotrentino-stirolhitoyoshimifunehitradinghjartda" + + "lhjelmelandholeckobierzyceholidayhomelinuxn--32vp30hagebostadhom" + + "esecuritymaceratakasagopocznosegawahomesecuritypccwinnershinichi" + + "nanhomesenseminehomeunixn--3bst00minamiogunicomcastresistancehon" + + "dahonefosshinjournalismailillesandefjordhoneywellhongorgehonjyoi" + + "takasakitanakagusukumoduminamisanrikubetsupplieshinjukumanohorni" + + "ndalhorseoulminamitanehortendofinternetrentino-sud-tirolhotelesh" + + "inkamigotoyohashimototalhotmailhoyangerhoylandetroitskolobrzeger" + + "sundhumanitieshinshinotsurgeonshalloffamemergencyberlevagangavii" + + "kanonjis-a-rockstarachowicehurdalhurumajis-a-socialistmeindianap" + + "olis-a-bloggerhyllestadhyogoris-a-soxfanhyugawarahyundaiwafunehz" + + "choseirouterjgorajlchoyodobashichikashukujitawarajlljmpgfoggiajn" + + "jelenia-gorajoyokaichibahcavuotnagaraholtaleniwaizumiotsukumiyam" + + "azonawsadodgemologicallyngenvironmentalconservationjpmorganjpnch" + + "ristmasakikugawatchandclockazojprshioyamemorialjuniperjurkristia" + + "nsundkrodsheradkrokstadelvaldaostarostwodzislawioshirakofuelkrym" + + "inamiyamashirokawanabelgorodeokumatorinokumejimassa-carrara-mass" + + "acarraramassabunkyonanaoshimageandsoundandvisionkumenanyokkaichi" + + "rurgiens-dentistes-en-francekunisakis-an-anarchistoricalsocietyk" + + "unitachiarailwaykunitomigusukumamotoyamasoykunneppupharmacyshira" + + "nukaniepcekunstsammlungkunstunddesignkuokgrouphiladelphiaareadmy" + + "blogsitekureisenkurgankurobelaudibleborkdalvdalaskanittedallasal" + + "leasingleshiraois-an-artisteinkjerusalembroiderykurogimilitaryku" + + "roisoftwarendalenugkuromatsunais-an-engineeringkurotakikawasakis" + + "-an-entertainerkurskomitamamurakushirogawakustanais-bykusuperspo" + + "rtrentinoaadigekutchanelkutnokuzbassnillfjordkuzumakis-certified" + + "ekakudamatsuekvafjordkvalsundkvamfamberkeleykvanangenkvinesdalkv" + + "innheradkviteseidskogkvitsoykwpspiegelkyowariasahikawamitourismo" + + "lanciamitoyoakemiuramiyazustkarasjokommunemiyotamanomjondalenmlb" + + "fanmonmouthaibarakisosakitagawamonstermonticellombardiamondshira" + + "okanmakiwakunigamihamadamontrealestatefarmequipmentrentinoalto-a" + + "digemonza-brianzaporizhzheguris-into-animelbournemonza-e-della-b" + + "rianzaporizhzhiamonzabrianzapposhiratakahagivingmonzaebrianzapto" + + "kuyamatsunomonzaedellabrianzaramoparachutingmordoviajessheiminan" + + "omoriyamatsusakahoginozawaonsenmoriyoshiokamitsuemormoneymoroyam" + + "atsushigemortgagemoscowitdkomonomoseushistorymosjoenmoskeneshish" + + "ikuis-into-carshintomikasaharamosshisognemosvikomorotsukamisunag" + + "awamoviemovistargardmtpchromedicaltanissettaitogliattiresaskatch" + + "ewanggouvicenzamtranakatsugawamuenstermugithubcloudusercontentre" + + "ntinoaltoadigemuikamogawamukochikushinonsenergymulhouservebeermu" + + "ltichoicemunakatanemuncieszynmuosattemuphilatelymurmanskomvuxn--" + + "3ds443gmurotorcraftrentinos-tirolmusashimurayamatsuuramusashinoh" + + "aramuseetrentinostirolmuseumverenigingmutsuzawamutuellevangermyd" + + "robofagemydshisuifuettertdasnetzmyeffectrentinosud-tirolmyfritzm" + + "yftphilipsymykolaivbarcelonagasakijobserverdalimoliserniaurskog-" + + "holandroverhalla-speziaeroportalabamagasakishimabarackmaze12myme" + + "diapchryslermyokohamamatsudamypepsonyoursidedyn-o-saurecipesaro-" + + "urbino-pesarourbinopesaromalvikongsbergmypetshitaramamyphotoshib" + + "ahccavuotnagareyamakeupowiathletajimabariakepnord-odalpharmacien" + + "snasaarlandmypsxn--3e0b707emysecuritycamerakermyshopblockshizuku" + + "ishimogosenmytis-a-bookkeepermincommbankommunalforbundmyvnchungb" + + "ukazunopictureshizuokannamiharupiemontepilotshoujis-into-cartoon" + + "shinyoshitomiokaneyamaxunusualpersonpimientakinouepinkongsvinger" + + "pioneerpippupiszpittsburghofauskedsmokorsetagayasells-for-ufcfan" + + "piwatepizzapkoninjamisonplanetariuminnesotaketakayamatsumaebashi" + + "modateplantationplantshowaplatformintelligenceplaystationplazapl" + + "chungnamdalseidfjordynv6plombardyndns-blogdnsiskinkyknethnologyp" + + "lumbingovtrentinosued-tirolplusterpmnpodzonepohlpointtomskonskow" + + "olancashireggioemiliaromagnakasatsunais-a-techietis-a-studentalp" + + "oivronpokerpokrovskonsulatrobeepilepsydneypolkowicepoltavalle-ao" + + "stathellexusdecorativeartshowtimeteorapphotographysiopomorzeszow" + + "ithgoogleapisa-hockeynutrentinosuedtirolpordenonepornporsangerpo" + + "rsanguideltajimicrolightingporsgrunnanpoznanpraxis-a-bruinsfanpr" + + "dpreservationpresidioprgmrprimelhusgardenprincipeprivatizehealth" + + "insuranceprochowiceproductionshriramlidlugolekagaminogiessenebak" + + "keshibechambagriculturennebudapest-a-la-masionthewifiat-band-cam" + + "paniaprofbsbxn--1lqs03nprogressivegaskimitsubatamicadaquesienapl" + + "esigdalprojectrentoyonakagyokutoyakokamishihoronobeokaminoyamats" + + "uris-into-gamessinashikitchenpromombetsupportrevisohughesilkonyv" + + "elolpropertyprotectionprudentialpruszkowithyoutubeneventodayprze" + + "worskogptzpvtroandinosaurlandesimbirskooris-a-therapistoiapwchur" + + "chaseljeepostfoldnavyatkakamigaharapzqldqponqslgbtrogstadquicksy" + + "tesimple-urlqvchuvashiaspreadbettingspydebergsrlsrtromsojavald-a" + + "ostarnbergsrvdonskoseis-an-accountantshinshirostoragestordalstor" + + "enburgstorfjordstpetersburgstreamsterdamnserverbaniastudiostudyn" + + "dns-homeftpaccesslingstuff-4-salestufftoread-booksneslupskopervi" + + "komatsushimashikestuttgartrusteesurnadalsurreysusakis-not-certif" + + "iedogawarabikomaezakirunorthwesternmutualsusonosuzakanrasuzukanu" + + "mazurysuzukis-saveducatorahimeshimakanegasakindleikangersvalbard" + + "udinkakegawasveiosvelvikosherbrookegawasvizzeraswedenswidnicargo" + + "daddyndns-at-homednshomebuiltrvenneslaskerrylogisticsmolenskoryo" + + "lasiteswiebodzindianmarketingswiftcoveronaritakurashikis-slickom" + + "aganeswinoujscienceandhistoryswisshikis-uberleetrentino-sued-tir" + + "olvestnesokndalvestre-slidreamhostersolarssonvestre-totennishiaw" + + "akuravestvagoyvevelstadvibo-valentiavibovalentiavideovillaskoyab" + + "earalvahkihokumakogengerdalipayufuchukotkafjordvinnicarriervinny" + + "tsiavipsinaappiagetmyiphoenixn--3oq18vl8pn36avirginiavirtualvirt" + + "ueeldomeindustriesteambulancevirtuelvisakatakkoelnvistaprinterna" + + "tionalfirearmsologneviterboltrysiljan-mayenvivoldavladikavkazanv" + + "ladimirvladivostokaizukarasuyamazoevlogoipictetrentinosudtirolvo" + + "lkenkunderseaportulansnoasaitamatsukuris-leetrentino-sudtirolvol" + + "kswagentsolundbeckosaigawavologdanskoshunantokigawavolvolgogradv" + + "olyngdalvoronezhytomyrvossevangenvotevotingvotoyonezawavrnworse-" + + "thangglidingwowiwatsukiyonowtversaillesokanoyakagewritesthisblog" + + "sytewroclawloclawekostromahachijorpelandwtcirclegnicagliaridagaw" + + "alterwtfbx-oslodingenwuozuwwworldwzmiuwajimaxn--4gq48lf9jeonname" + + "rikawauexn--4it168dxn--4it797kotohiradomainsurehabmerxn--4pvxsol" + + "utionsirdalxn--54b7fta0cciticatholicheltenham-radio-openair-traf" + + "fic-controlleyxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilavi" + + "ationisshingugexn--5rtq34kotouraxn--5su34j936bgsgxn--5tzm5gxn--6" + + "btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilisationi" + + "yodogawaxn--80adxhksomaxn--80ao21axn--80aqecdr1axn--80asehdbarcl" + + "aycardstvedestrandiskstationatuurwetenschappenaumburgladelmenhor" + + "stalbans3-us-west-1xn--80aswgxn--80audnedalnxn--8ltr62kouhokutam" + + "akizunokunimilanoxn--8pvr4uxn--8y0a063axn--90a3academyactivedire" + + "ctoryazannakadomari-elasticbeanstalkounosunndalxn--90aishobaraom" + + "origuchiharagusabaerobaticketsaritsynologyeongnamegawakeisenbahn" + + "xn--90azhair-surveillancexn--9dbhblg6dietcimmobilienxn--9dbq2axn" + + "--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--asky-ira" + + "xn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakegawaxn--" + + "b-5gaxn--b4w605ferdxn--bck1b9a5dre4civilizationrwiiheyaizuwakama" + + "tsubushikusakadogawaxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg" + + "-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuura" + + "xn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptaobaokinawashirosa" + + "tobishimaintenancexn--blt-elaborxn--bmlo-graingerxn--bod-2naroyx" + + "n--brnny-wuaccident-investigationjukudoyamagadancebetsukubabia-g" + + "oracleaningatlantabusebastopologyeonggiehtavuoatnadexeterimo-i-r" + + "anagahamaroygardendoftheinternetflixilovecollegefantasyleaguerns" + + "eyxn--brnnysund-m8accident-preventionlineat-urlxn--brum-voagatun" + + "esnzxn--btsfjord-9zaxn--c1avgxn--c2br7gxn--c3s14misasaguris-gone" + + "xn--cck2b3barefootballangenoamishirasatochigiftsakuragawaustevol" + + "lavangenativeamericanantiques3-eu-central-1xn--cg4bkis-very-bada" + + "ddjamalborkangerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisawa" + + "xn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr6" + + "94bargainstitutelekommunikationaustdalindasiaustinnaturalhistory" + + "museumcentereportarnobrzegyptianaturalsciencesnaturelles3-eu-wes" + + "t-1xn--czrs0tunkoshimizumakiyosumydissentrentinoa-adigexn--czru2" + + "dxn--czrw28barreauctionaval-d-aosta-valleyonagoyaustraliaisondri" + + "odejaneirochestereviewskrakowebhoppdalaziobihirosakikamijimattel" + + "edatabaseballooningjesdalillyokosukareliancebinorilskariyakumold" + + "evennodessagaeroclubmedecincinnationwidealerhcloudcontrolledds3-" + + "external-1xn--d1acj3barrel-of-knowledgeologyonaguniversityoriika" + + "shibatakashimarylhurstjordalshalsenavigationavuotnakayamatsuzaki" + + "bigawaustrheimatunduhrennesoyokotebizenakamuratakahamaniwakurate" + + "xasdaburyatiaarpagefrontappagespeedmobilizerobiraetnagaivuotnaga" + + "okakyotambabydgoszczecinemailavagiske164xn--d1alfaromeoxn--d1atu" + + "rystykarasjohkamiokaminokawanishiaizubangexn--d5qv7z876civilwarm" + + "anagementkmaxxn--11b4c3dyroyrvikinguitarsassaris-a-democratmpana" + + "sonichelyabinskodjeffersonishiokoppegardyndns-weberlincolnishito" + + "sashimizunaminamiashigaraxn--davvenjrga-y4axn--djrs72d6uyxn--djt" + + "y4kouyamashikis-an-actorxn--dnna-grajewolterskluwerxn--drbak-wua" + + "xn--dyry-iraxn--e1a4claimsatxn--1ck2e1balsanagochihayaakasakawah" + + "araumalopolskanlandiscoveryokamikawanehonbetsurutaharaugustowada" + + "egubs3-ap-southeast-2xn--eckvdtc9dxn--efvn9somnarashinoxn--efvy8" + + "8hakatanotogawaxn--ehqz56nxn--elqq16hakodatexn--estv75gxn--eveni" + + "-0qa01gaxn--f6qx53axn--fct429kouzushimashikokuchuoxn--fhbeiarnxn" + + "--finny-yuaxn--fiq228c5hsooxn--fiq64barrell-of-knowledgeometre-e" + + "xperts-comptablesakuraibmditchyouripalaceu-1xn--fiqs8sopotromsak" + + "akinokiaxn--fiqz9sor-odalxn--fjord-lraxn--fjq720axn--fl-ziaxn--f" + + "lor-jraxn--flw351exn--fpcrj9c3dxn--frde-grandrapidsor-varangerxn" + + "--frna-woaraisaijosoyrovigorlicexn--frya-hraxn--fzc2c9e2clickddi" + + "elddanuorrikuzentakatajirissagamiharaxn--fzys8d69uvgmailxn--g2xx" + + "48clinichernigovernmentjxn--0trq7p7nnishiwakis-a-cubicle-slavell" + + "inowruzhgorodoyxn--gckr3f0fbxostrolekaluganskharkovallee-aostero" + + "yxn--gecrj9cliniquenoharaxn--ggaviika-8ya47hakonexn--gildeskl-g0" + + "axn--givuotna-8yandexn--3pxu8kosugexn--gjvik-wuaxn--gk3at1exn--g" + + "ls-elacaixaxn--gmq050is-very-evillagexn--gmqw5axn--h-2failxn--h1" + + "aeghakubankmpspacekitagatakasugais-a-nascarfanxn--h2brj9clintono" + + "shoesaudaxn--hbmer-xqaxn--hcesuolo-7ya35bashkiriauthordalandroid" + + "gcanonoichinomiyakehimejibestadigitalimanowarudagroks-thisamitsu" + + "kembuchikumagayagawakkanaibetsubamericanfamilydscloudappspotager" + + "epairbusantiquest-a-la-maisondre-landebusinessebyklefrakkestaddn" + + "skingjerdrumckinseyekaterinburgjerstadotsuruokamchatkameokameyam" + + "ashinatsukigatakamatsukawabogadocscbggfareastcoastaldefence-burg" + + "jemnes3-ap-northeast-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeast" + + "a-s4acctuscanyxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmi" + + "r-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn" + + "--imr513nxn--indery-fyaotsurgutsiracusaitoshimaxn--io0a7is-very-" + + "goodhandsonxn--j1aefermobilyxn--j1amhakuis-a-nurservebbshellaspe" + + "ziaxn--j6w193gxn--jlq61u9w7basilicataniautomotivecodynaliascoli-" + + "picenoipirangamvikarlsoyokozemersongdalenviknakaniikawatanaguram" + + "usementargets-itargi234xn--jlster-byaroslavlaanderenxn--jrpeland" + + "-54axn--jvr189misconfusedxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--k" + + "crx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--klt" + + "x9axn--klty5xn--42c2d9axn--koluokta-7ya57hakusandiegoodyearthaga" + + "khanamigawaxn--kprw13dxn--kpry57dxn--kpu716ferraraxn--kput3is-ve" + + "ry-nicexn--krager-gyasakaiminatoyonoxn--kranghke-b0axn--krdshera" + + "d-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-suedtirolxn--" + + "ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugis-very-sweetpepperxn--" + + "kvnangen-k0axn--l-1fairwindsorfoldxn--l1accentureklamborghiniiza" + + "xn--laheadju-7yasuokaratexn--langevg-jxaxn--lcvr32dxn--ldingen-q" + + "1axn--leagaviika-52basketballfinanzgorautoscanadaejeonbukarmoyom" + + "itanobninskarpaczeladz-1xn--lesund-huaxn--lgbbat1ad8jevnakershus" + + "cultureggiocalabriaxn--lgrd-poacoachampionshiphoptobamagazinebra" + + "skaunjargallupinbatochiokinoshimalselvendrellindesnesakyotanabel" + + "lunordlandivtasvuodnaharimamurogawawegroweibolzanordreisa-geekas" + + "hiharaveroykenglandiscountysvardolls3-external-2xn--lhppi-xqaxn-" + + "-linds-pramericanartushuissier-justicexn--lns-qlanxessorreisahay" + + "akawakamiichikawamisatottoris-lostre-toteneis-a-teacherkassymant" + + "echnologyxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liaclo" + + "thingujolsterxn--lten-granexn--lury-iraxn--mely-iraxn--merker-ku" + + "axn--mgb2ddesortlandxn--mgb9awbferrarittogoldpoint2thisayamanash" + + "iibadajozorahkkeravjudygarlandxn--mgba3a3ejtuvalle-daostavangerx" + + "n--mgba3a4f16axn--mgba3a4franamizuholdingsmileksvikozagawaxn--mg" + + "ba7c0bbn0axn--mgbaakc7dvferreroticapebretonamiasakuchinotsuchiur" + + "akawarszawashingtondclkhersonxn--mgbaam7a8haldenxn--mgbab2bdxn--" + + "mgbai9a5eva00batsfjordivttasvuotnakaiwamizawavocatanzaroweddingj" + + "ovikaruizawasnesoddenmarkets3-ap-northeast-2xn--mgbai9azgqp6jewe" + + "lryxn--mgbayh7gpaduaxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc0" + + "a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbi" + + "4ecexposedxn--mgbpl2fhskozakis-an-actresshintokushimaxn--mgbqly7" + + "c0a67fbcloudfrontdoorxn--mgbqly7cvafredrikstadtvsorumisakis-foun" + + "dationxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhausposts-and-" + + "telecommunicationsupdatelemarkashiwaravoues3-fips-us-gov-west-1x" + + "n--mgbx4cd0abbottuxfamilyxn--mix082fetsundxn--mix891fgunmarnarda" + + "lxn--mjndalen-64axn--mk0axinfinitis-with-thebandoomdnsaliascolip" + + "icenord-aurdalceshiojirishirifujiedaxn--mk1bu44cloudfunctionsauh" + + "eradxn--mkru45isleofmandalxn--mlatvuopmi-s4axn--mli-tlapyatigors" + + "kpnxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuhokkaidontexisteing" + + "eekppspbananarepublicartierxn--mosjen-eyatominamiawajikissmarter" + + "thanyouslivinghistoryxn--mot-tlaquilancasterxn--mre-og-romsdal-q" + + "qbbcartoonartdecoffeedbackashiwazakiyokawaraxastronomycdn77-secu" + + "rebungoonord-frontierepbodyndns-freebox-oskolegokasells-for-less" + + "3-ap-southeast-1xn--msy-ula0halsaintlouis-a-anarchistoireggio-em" + + "ilia-romagnakanotoddenxn--mtta-vrjjat-k7afamilycompanycntoyookan" + + "zakiwienxn--muost-0qaxn--mxtq1mishimatsumotofukexn--ngbc5azdxn--" + + "ngbe9e0axn--ngbrxn--45brj9circus-2xn--nit225krasnodarxn--nmesjev" + + "uemie-tcbajddarchaeologyxn--nnx388axn--nodexn--nqv7fs00emaxn--nr" + + "y-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservecounterstrik" + + "exn--nvuotna-hwaxn--nyqy26axn--o1achattanooganorfolkebiblegalloc" + + "us-1xn--o3cw4hammarfeastafricamagichofunatorientexpressaseboknow" + + "sitalluxembourgrpanamaxn--od0algxn--od0aq3bbtatamotorsalangenayo" + + "roceanographicsalondonetskasukabedzin-the-bandaioiraseeklogesura" + + "nceoceanographiqueu-2xn--ogbpf8flekkefjordxn--oppegrd-ixaxn--ost" + + "ery-fyatsukaratsuginamikatagamihoboleslawiecolonialwilliamsburgu" + + "lenxn--osyro-wuaxn--p1acfhvalerxn--p1aiwchoshibuyachiyodavvesiid" + + "azaifuefukihaborokunohealth-carereformitakeharaxn--pbt977colorad" + + "oplateaudioxn--pgbs0dhlxn--porsgu-sta26fidonnakamagayachtscrappe" + + "r-sitexn--pssu33lxn--pssy2uxn--q9jyb4columbusheyxn--qcka1pmcdona" + + "ldsouthcarolinazawaxn--qqqt11missilelxn--qxamurskiptveterinairea" + + "ltorlandxn--rady-iraxn--rdal-poaxn--rde-ularvikrasnoyarskomforba" + + "mblebtimnetz-2xn--rdy-0nabarixn--rennesy-v1axn--rhkkervju-01afla" + + "kstadaokagakibichuoxn--rholt-mragowoodsidexn--rhqv96gxn--rht27zx" + + "n--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--rland-uuaxn-" + + "-rlingen-mxaxn--rmskog-byatsushiroxn--rny31hamurakamigoriginshim" + + "okawaxn--rovu88bbvacationswatch-and-clockerxn--rros-granvindafjo" + + "rdxn--rskog-uuaxn--rst-0narutokyotangotpantheonsitextileitungsen" + + "xn--rsta-francaiseharaxn--ryken-vuaxn--ryrvik-byawaraxn--s-1fait" + + "heguardianxn--s9brj9communitysnesavannahgaxn--sandnessjen-ogbizh" + + "evskredirectmeldalxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-" + + "gratangenxn--skierv-utazaskvolloabathsbcomobaraxn--skjervy-v1axn" + + "--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narviikananporov" + + "noxn--slt-elabourxn--smla-hraxn--smna-gratis-a-bulls-fanxn--snas" + + "e-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr-au" + + "rdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyu" + + "kuhashimojiinetatarstanflfanfshostrodawaraxn--srfold-byawatahama" + + "xn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--stj" + + "rdalshalsen-sqbeppubolognagasukeverbankasumigaurawa-mazowszexbox" + + "enapponazure-mobilevje-og-hornnesaltdalinkasuyakutiaxn--stre-tot" + + "en-zcbsouthwestfalenxn--t60b56axn--tckweatherchannelxn--tiq49xqy" + + "jewishartgalleryxn--tjme-hraxn--tn0agrinet-freaksowaxn--tnsberg-" + + "q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-" + + "zuaxn--tysvr-vraxn--uc0atversicherungxn--uc0ay4axn--uist22hangou" + + "tsystemscloudcontrolappasadenaklodzkodairaxn--uisz3gxn--unjrga-r" + + "tarantourspjelkavikosakaerodromegalsacechirealminamiuonumasudaxn" + + "--unup4yxn--uuwu58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--v" + + "ermgensberater-ctberndiyurihonjournalistjohnhlfanhsalvadordaliba" + + "baikaliszczytnorddalinzaiitatebayashijonawatexn--vermgensberatun" + + "g-pwbeskidynathomedepotenzachpomorskienikiiyamanobeauxartsandcra" + + "ftsalzburglassassinationalheritagematsubarakawagoexn--vestvgy-ix" + + "a6oxn--vg-yiabbvieeexn--vgan-qoaxn--vgsy-qoa0jfkomakiyosatokashi" + + "kiyosemitexn--vgu402comparemarkerryhotelsaves-the-whalessandria-" + + "trani-barletta-andriatranibarlettaandriaxn--vhquvestfoldxn--vler" + + "-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bestbu" + + "yshousesamegawaxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgb" + + "h1compute-1xn--wgbl6axn--xhq521betainaboxfusejnynysafetysfjordnp" + + "alanakhodkanagawaxn--xkc2al3hye2axn--xkc2dl3a5ee0hannanmokuizumo" + + "dernxn--y9a3aquariumisugitokorozawaxn--yer-znarvikristiansandcat" + + "shirahamatonbetsurgeryxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn" + + "--45q11citadeliveryggeelvinckchristiansburgruexn--ystre-slidre-u" + + "jbieidsvollipetskaszubyusuharaxn--zbx025dxn--zf0ao64axn--zf0avxn" + + "--4gbriminingxn--zfr164bielawallonieruchomoscienceandindustrynik" + + "koebenhavnikolaeventsamnangerxperiaxz" + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 9 bits] children index +// [ 1 bits] ICANN bit +// [15 bits] text index +// [ 6 bits] text length +var nodes = [...]uint32{ + 0x27a003, + 0x328304, + 0x272406, + 0x36e2c3, + 0x36e2c6, + 0x3a6306, + 0x260483, + 0x206e44, + 0x345647, + 0x272048, + 0x1a00882, + 0x30abc7, + 0x355a09, + 0x2eb6ca, + 0x2eb6cb, + 0x22f803, + 0x28f606, + 0x232a05, + 0x1e00702, + 0x215f44, + 0x236483, + 0x278b45, + 0x2208ac2, + 0x330fc3, + 0x26cf584, + 0x328c05, + 0x2a014c2, + 0x378a0e, + 0x24d0c3, + 0x37e606, + 0x37e60b, + 0x2e01c42, + 0x350f47, + 0x235446, + 0x3200a42, + 0x258943, + 0x258944, + 0x343086, + 0x23c1c8, + 0x287c86, + 0x2717c4, + 0x3600ec2, + 0x329b89, + 0x3a2ec7, + 0x2f7646, + 0x357689, + 0x295f08, + 0x2af504, + 0x3a0846, + 0x216b86, + 0x3a02a82, + 0x25af4f, + 0x34280e, + 0x211dc4, + 0x2bc7c5, + 0x2e4bc5, + 0x2ec7c9, + 0x23ecc9, + 0x340e47, + 0x212fc6, + 0x212f03, + 0x3e04a42, + 0x270703, + 0x22098a, + 0x20b0c3, + 0x2607c5, + 0x287302, + 0x287309, + 0x4201e02, + 0x208184, + 0x206986, + 0x237bc5, + 0x34ea44, + 0x4a86a04, + 0x201e03, + 0x231a44, + 0x4e02902, + 0x328044, + 0x31ea44, + 0x22350a, + 0x52009c2, + 0x2d2687, + 0x238088, + 0x5a08f02, + 0x321407, + 0x2b7744, + 0x2b7747, + 0x385185, + 0x36ccc7, + 0x340c06, + 0x21cec4, + 0x357985, + 0x299d07, + 0x6a01cc2, + 0x2af103, + 0x213402, + 0x375ac3, + 0x6e136c2, + 0x283905, + 0x7204a02, + 0x329244, + 0x27eec5, + 0x211d07, + 0x3731ce, + 0x2e3e04, + 0x245c04, + 0x208143, + 0x2ce4c9, + 0x307ecb, + 0x30f508, + 0x31a288, + 0x31e088, + 0x323148, + 0x3574ca, + 0x36cbc7, + 0x2272c6, + 0x769ed02, + 0x375043, + 0x37fa83, + 0x38d3c4, + 0x260d03, + 0x2604c3, + 0x1711602, + 0x7a070c2, + 0x24a0c5, + 0x28ecc6, + 0x2c9e84, + 0x396e07, + 0x32cc06, + 0x341644, + 0x3a9e87, + 0x2070c3, + 0x7ebefc2, + 0x8305b42, + 0x8619ac2, + 0x219ac6, + 0x8a00002, + 0x37dd45, + 0x312b43, + 0x204384, + 0x2db0c4, + 0x2db0c5, + 0x2075c3, + 0x8f27883, + 0x920a882, + 0x28a905, + 0x28a90b, + 0x22be86, + 0x20cd4b, + 0x276844, + 0x20d309, + 0x20f104, + 0x960f602, + 0x210943, + 0x2125c3, + 0x1612742, + 0x245dc3, + 0x21274a, + 0x9a12bc2, + 0x2161c5, + 0x290fca, + 0x2cd084, + 0x213a83, + 0x213f44, + 0x2159c3, + 0x2159c4, + 0x2159c7, + 0x216f85, + 0x217785, + 0x218e86, + 0x219d86, + 0x21a783, + 0x21e908, + 0x258283, + 0x9e03482, + 0x21f388, + 0x21474b, + 0x222208, + 0x222986, + 0x223907, + 0x227e88, + 0xa63a242, + 0xaa715c2, + 0x2e3688, + 0x29f847, + 0x242c85, + 0x242c88, + 0x343988, + 0x383b83, + 0x22a7c4, + 0x38d402, + 0xae2cb02, + 0xb2519c2, + 0xba2ce42, + 0x22ce43, + 0xbe01482, + 0x206e03, + 0x201484, + 0x21a903, + 0x2af4c4, + 0x25fc4b, + 0x214683, + 0x2d3946, + 0x223384, + 0x29e18e, + 0x341045, + 0x265808, + 0x2246c7, + 0x2246ca, + 0x22fd03, + 0x275647, + 0x308085, + 0x22fd04, + 0x22fd06, + 0x22fd07, + 0x2c62c4, + 0x373507, + 0x2028c4, + 0x2093c4, + 0x2093c6, + 0x2dc104, + 0x221c46, + 0x2138c3, + 0x226b48, + 0x303708, + 0x245bc3, + 0x245d83, + 0x395544, + 0x39b103, + 0xc200482, + 0xc707ac2, + 0x2004c3, + 0x208406, + 0x381043, + 0x228584, + 0xca19942, + 0x2d7f03, + 0x219943, + 0x21b682, + 0xce008c2, + 0x2bb486, + 0x233907, + 0x2e9005, + 0x344c04, + 0x2a1c45, + 0x2021c7, + 0x26e685, + 0x2aff89, + 0x2c75c6, + 0x2d0108, + 0x2e8f06, + 0xd2092c2, + 0x23bd88, + 0x300a86, + 0x20dc05, + 0x3af1c7, + 0x303604, + 0x303605, + 0x287e44, + 0x287e48, + 0xd60a1c2, + 0xda036c2, + 0x32f506, + 0x3160c8, + 0x338e05, + 0x33a086, + 0x33c2c8, + 0x35ff48, + 0xdec8b85, + 0x2036c4, + 0x324407, + 0xe20d9c2, + 0xe61eb82, + 0xfa06a82, + 0x3597c5, + 0x2a22c5, + 0x3753c6, + 0x317a47, + 0x22aac7, + 0x1022bf83, + 0x2a5b07, + 0x2d4808, + 0x390509, + 0x378bc7, + 0x3a7507, + 0x22e108, + 0x22e906, + 0x22f846, + 0x23020c, + 0x230d8a, + 0x231247, + 0x2328cb, + 0x233747, + 0x23374e, + 0x234744, + 0x234a44, + 0x238e47, + 0x25a647, + 0x23d186, + 0x23d187, + 0x23dd87, + 0x13208942, + 0x23f586, + 0x23f58a, + 0x23f80b, + 0x240bc7, + 0x241585, + 0x2418c3, + 0x241dc6, + 0x241dc7, + 0x23ee83, + 0x1362ea42, + 0x24268a, + 0x13b56b42, + 0x13ea4c42, + 0x14244142, + 0x14635542, + 0x244ec5, + 0x2459c4, + 0x14e00682, + 0x3280c5, + 0x278b03, + 0x315b45, + 0x2124c4, + 0x293906, + 0x202bc6, + 0x28ab03, + 0x3654c4, + 0x324ec3, + 0x15201582, + 0x208d04, + 0x324986, + 0x208d05, + 0x258006, + 0x3af2c8, + 0x21ecc4, + 0x236248, + 0x2e01c5, + 0x32bcc8, + 0x2dce46, + 0x2b4247, + 0x22f244, + 0x22f246, + 0x323fc3, + 0x385503, + 0x2bfd08, + 0x30d944, + 0x341787, + 0x248cc6, + 0x30af09, + 0x35c488, + 0x330488, + 0x24f8c4, + 0x3a2543, + 0x206c82, + 0x1560b042, + 0x15a05382, + 0x3abf43, + 0x15e12c42, + 0x345784, + 0x2af205, + 0x29d8c3, + 0x2306c4, + 0x302947, + 0x344943, + 0x2465c8, + 0x205f85, + 0x308144, + 0x36bb43, + 0x27ee45, + 0x27ef84, + 0x2090c6, + 0x20c244, + 0x20d086, + 0x211c46, + 0x261504, + 0x2199c3, + 0x162b1a02, + 0x350e05, + 0x223cc3, + 0x16600442, + 0x2bbf85, + 0x231b03, + 0x231b09, + 0x16a04142, + 0x172110c2, + 0x329605, + 0x21cd46, + 0x34c707, + 0x2c9a46, + 0x2b9908, + 0x2b990b, + 0x20844b, + 0x2e9205, + 0x2d07c5, + 0x2c0a89, + 0x1600bc2, + 0x2616c8, + 0x20cf84, + 0x17a00202, + 0x25f883, + 0x1825a806, + 0x380ec8, + 0x18602e42, + 0x226088, + 0x18a08b02, + 0x27698a, + 0x228bc3, + 0x3b23c6, + 0x3997c8, + 0x204188, + 0x334fc6, + 0x36a0c7, + 0x25b147, + 0x21670a, + 0x2cd104, + 0x33ed84, + 0x3554c9, + 0x38ff05, + 0x342a06, + 0x20b203, + 0x249604, + 0x2143c4, + 0x24fd07, + 0x22d547, + 0x26b1c4, + 0x216645, + 0x375488, + 0x3617c7, + 0x364607, + 0x18e09342, + 0x2e3cc4, + 0x2946c8, + 0x3859c4, + 0x246a04, + 0x246e05, + 0x246f47, + 0x210c49, + 0x247d04, + 0x248a09, + 0x248fc8, + 0x249384, + 0x249387, + 0x249b83, + 0x24a707, + 0x1649242, + 0x17a6b82, + 0x24b646, + 0x24c287, + 0x24c884, + 0x24d607, + 0x24e647, + 0x24ed88, + 0x24f503, + 0x23d6c2, + 0x202442, + 0x251003, + 0x251004, + 0x25100b, + 0x31a388, + 0x257f44, + 0x251d05, + 0x253c87, + 0x2569c5, + 0x36bf0a, + 0x257e83, + 0x1920db02, + 0x258184, + 0x25a409, + 0x25f283, + 0x25f347, + 0x36b309, + 0x376348, + 0x208a03, + 0x27dd47, + 0x27e489, + 0x2840c3, + 0x285e04, + 0x286bc9, + 0x289286, + 0x28a343, + 0x201c82, + 0x244d43, + 0x39c247, + 0x37de85, + 0x35a206, + 0x24a304, + 0x2e6285, + 0x220943, + 0x21a9c6, + 0x20d502, + 0x390ec4, + 0x225902, + 0x2daa43, + 0x196007c2, + 0x247643, + 0x21a204, + 0x21a207, + 0x204686, + 0x24cdc2, + 0x19a53a02, + 0x3af4c4, + 0x19e39ec2, + 0x1a202842, + 0x31aac4, + 0x31aac5, + 0x28de45, + 0x2c2a86, + 0x1a603ac2, + 0x308d05, + 0x3a5245, + 0x29a0c3, + 0x204e86, + 0x212b05, + 0x219a42, + 0x339cc5, + 0x219a44, + 0x21ec03, + 0x21ee43, + 0x1aa0be82, + 0x2f1f87, + 0x361a44, + 0x361a49, + 0x249504, + 0x23a103, + 0x34a009, + 0x350cc8, + 0x2a2144, + 0x2a2146, + 0x2a4543, + 0x214dc3, + 0x22a0c4, + 0x250cc3, + 0x1aee0682, + 0x301c42, + 0x1b210702, + 0x314a48, + 0x3801c8, + 0x394986, + 0x245545, + 0x229b85, + 0x210705, + 0x224242, + 0x1b6931c2, + 0x1633602, + 0x390088, + 0x23bcc5, + 0x3052c4, + 0x2e0105, + 0x32b887, + 0x257c84, + 0x23d4c2, + 0x1ba03e82, + 0x30d204, + 0x2140c7, + 0x39ee87, + 0x36cc84, + 0x290f83, + 0x245b04, + 0x245b08, + 0x22fb86, + 0x22fb8a, + 0x210b04, + 0x291308, + 0x24f004, + 0x223a06, + 0x293184, + 0x359ac6, + 0x341dc9, + 0x266587, + 0x235903, + 0x1be10442, + 0x26ecc3, + 0x20f802, + 0x1c217e82, + 0x2df3c6, + 0x363888, + 0x2a3687, + 0x3a4389, + 0x23a009, + 0x2a3f45, + 0x2a50c9, + 0x2a5f45, + 0x2a6a09, + 0x2a8105, + 0x288484, + 0x288487, + 0x2998c3, + 0x2a8e07, + 0x3a78c6, + 0x2a9607, + 0x2a0f05, + 0x2ab543, + 0x1c630842, + 0x392904, + 0x1ca29982, + 0x25a043, + 0x1ce134c2, + 0x2e6cc6, + 0x238005, + 0x2acc47, + 0x335583, + 0x260c84, + 0x203bc3, + 0x2e33c3, + 0x1d20b542, + 0x1da00042, + 0x3a6404, + 0x23d683, + 0x397285, + 0x2aafc5, + 0x1de04982, + 0x1e600942, + 0x27e086, + 0x20ad44, + 0x30da84, + 0x30da8a, + 0x1ee02002, + 0x2f920a, + 0x36f688, + 0x1f2023c4, + 0x215ac3, + 0x24bc43, + 0x31e1c9, + 0x22dec9, + 0x302a46, + 0x1f602243, + 0x2d9885, + 0x2f9e4d, + 0x207286, + 0x21134b, + 0x1fa016c2, + 0x34c188, + 0x1fe1ea02, + 0x20208282, + 0x370545, + 0x20603fc2, + 0x269947, + 0x2a6507, + 0x21db43, + 0x258c48, + 0x20a07382, + 0x282f04, + 0x212d83, + 0x34bb85, + 0x383983, + 0x237ac6, + 0x2eae04, + 0x245d43, + 0x26f203, + 0x20e0abc2, + 0x2e9184, + 0x353985, + 0x367f07, + 0x27bbc3, + 0x2ad443, + 0x2adc43, + 0x1622602, + 0x2add03, + 0x2adf83, + 0x21202dc2, + 0x2ce884, + 0x27f1c6, + 0x20fa03, + 0x2ae303, + 0x216afcc2, + 0x2afcc8, + 0x2b0784, + 0x2402c6, + 0x2b0bc7, + 0x218fc6, + 0x338f04, + 0x2f2001c2, + 0x3a778b, + 0x2f29ce, + 0x21d4cf, + 0x234343, + 0x2fa44d02, + 0x1605482, + 0x2fe04b42, + 0x227dc3, + 0x233343, + 0x238c46, + 0x2f0ac6, + 0x2e6587, + 0x379084, + 0x3028da82, + 0x306062c2, + 0x2f9b45, + 0x2ee007, + 0x2f15c6, + 0x30a6cf82, + 0x26cf84, + 0x372003, + 0x30e0ac82, + 0x352e03, + 0x3910c4, + 0x2b6949, + 0x16bd702, + 0x31235c82, + 0x2dac86, + 0x26b485, + 0x31645cc2, + 0x31a00102, + 0x33e107, + 0x2033c9, + 0x355c8b, + 0x25af05, + 0x3748c9, + 0x2be006, + 0x22bec7, + 0x2060c4, + 0x2cf089, + 0x35dbc7, + 0x2b7ec7, + 0x20ae83, + 0x20ae86, + 0x2dd5c7, + 0x2387c3, + 0x27cf86, + 0x31e049c2, + 0x32231d82, + 0x21bfc3, + 0x260885, + 0x221ac7, + 0x343c86, + 0x37de05, + 0x376b04, + 0x2de1c5, + 0x2e9b84, + 0x32600f02, + 0x321d87, + 0x2e2904, + 0x22ddc4, + 0x22ddcd, + 0x24c649, + 0x2e04c8, + 0x22bb04, + 0x323605, + 0x2633c7, + 0x2ceb44, + 0x32ccc7, + 0x35ab05, + 0x32b9a984, + 0x2ce185, + 0x25df44, + 0x374206, + 0x317845, + 0x32e34802, + 0x213b44, + 0x213b45, + 0x38d946, + 0x37df45, + 0x2548c4, + 0x2e7043, + 0x380406, + 0x20efc5, + 0x210e45, + 0x317944, + 0x210b83, + 0x210b8c, + 0x33289bc2, + 0x33605602, + 0x33a17382, + 0x39a883, + 0x39a884, + 0x33e03702, + 0x2fd288, + 0x35a2c5, + 0x37f144, + 0x29fe46, + 0x34233a82, + 0x3461ca02, + 0x34a00982, + 0x2b5dc5, + 0x2613c6, + 0x24fc44, + 0x3435c6, + 0x2d2446, + 0x207a03, + 0x34f2afca, + 0x23d9c5, + 0x2f3506, + 0x2f3509, + 0x367547, + 0x291748, + 0x295dc9, + 0x218388, + 0x322e86, + 0x23db83, + 0x35206a42, + 0x386e83, + 0x386e89, + 0x3442c8, + 0x3560ad82, + 0x35a0f842, + 0x232003, + 0x2cff85, + 0x251804, + 0x2c1a09, + 0x2aa9c4, + 0x2b09c8, + 0x20f843, + 0x2600c4, + 0x329d03, + 0x22dd07, + 0x35e3c442, + 0x25a2c2, + 0x22af85, + 0x26d1c9, + 0x220ec3, + 0x27f804, + 0x2d9844, + 0x263443, + 0x28088a, + 0x3636f542, + 0x36613b02, + 0x2bef43, + 0x3721c3, + 0x1660082, + 0x260f43, + 0x36a50702, + 0x3891c4, + 0x36e02ac2, + 0x3730db04, + 0x34a586, + 0x27e2c4, + 0x2406c3, + 0x284a83, + 0x21c443, + 0x23fb86, + 0x2c4d05, + 0x2bf7c7, + 0x22bd89, + 0x2c37c5, + 0x2c4c46, + 0x2c5248, + 0x2c5446, + 0x256604, + 0x29920b, + 0x2c70c3, + 0x2c70c5, + 0x2c7208, + 0x21e782, + 0x33e402, + 0x37629382, + 0x37a03642, + 0x2632c3, + 0x37e08b82, + 0x26e443, + 0x2c7504, + 0x2c83c3, + 0x38607682, + 0x2c9f8b, + 0x38acc586, + 0x2ef8c6, + 0x2ccbc8, + 0x38ecae42, + 0x39212602, + 0x3961ee82, + 0x39a0b602, + 0x39e02642, + 0x20264b, + 0x3a201842, + 0x2262c3, + 0x316c05, + 0x321ac6, + 0x3a611004, + 0x20b687, + 0x32478a, + 0x31ec86, + 0x2e9444, + 0x262ec3, + 0x3b20dbc2, + 0x202b42, + 0x2570c3, + 0x3b64c083, + 0x2635c7, + 0x317747, + 0x3ca51107, + 0x228b87, + 0x214983, + 0x2248ca, + 0x214984, + 0x248bc4, + 0x248bca, + 0x24f205, + 0x3ce02402, + 0x24e143, + 0x3d200dc2, + 0x20f543, + 0x26ec83, + 0x3da01742, + 0x2a5a84, + 0x220684, + 0x201745, + 0x2d8385, + 0x2368c6, + 0x236c46, + 0x3de09142, + 0x3e201042, + 0x3360c5, + 0x2ef5d2, + 0x24ca86, + 0x226a03, + 0x33b046, + 0x2ff645, + 0x160b282, + 0x4660d682, + 0x2efd43, + 0x310b83, + 0x2dbcc3, + 0x46a07902, + 0x378d03, + 0x46e12f42, + 0x2a3443, + 0x2ce8c8, + 0x222f43, + 0x222f46, + 0x313c87, + 0x210586, + 0x21058b, + 0x2e9387, + 0x392704, + 0x47602102, + 0x3a0745, + 0x202b03, + 0x22cd43, + 0x3188c3, + 0x3188c6, + 0x2d088a, + 0x273f43, + 0x235304, + 0x316006, + 0x20e006, + 0x47a04703, + 0x260b47, + 0x37ea8d, + 0x38cd87, + 0x298f45, + 0x246406, + 0x20f003, + 0x492050c3, + 0x49609242, + 0x3283c4, + 0x22d28c, + 0x32bf09, + 0x23a787, + 0x249885, + 0x268144, + 0x272748, + 0x279205, + 0x286a85, + 0x28d409, + 0x2f7703, + 0x2f7704, + 0x2a4bc4, + 0x49a00ac2, + 0x265883, + 0x49e92c42, + 0x2a1d46, + 0x160b142, + 0x4a299882, + 0x2b5cc8, + 0x2ce0c7, + 0x299885, + 0x2de9cb, + 0x2d1dc6, + 0x2debc6, + 0x2f8346, + 0x224cc4, + 0x2fba46, + 0x2d51c8, + 0x232243, + 0x247403, + 0x247404, + 0x2d6c84, + 0x2d7007, + 0x2d8185, + 0x4a6d82c2, + 0x4aa0a742, + 0x20a745, + 0x29cd44, + 0x2d9b8b, + 0x2dafc8, + 0x2db6c4, + 0x26cfc2, + 0x4b2aff02, + 0x2aff03, + 0x2dbb04, + 0x2dd185, + 0x22a547, + 0x2dfc44, + 0x2e9244, + 0x4b608582, + 0x35d1c9, + 0x2e0b05, + 0x25b1c5, + 0x2e1685, + 0x4ba1d603, + 0x2e24c4, + 0x2e24cb, + 0x2e4144, + 0x2e45cb, + 0x2e6745, + 0x21d60a, + 0x2e7108, + 0x2e730a, + 0x2e7583, + 0x2e758a, + 0x4be297c2, + 0x4c242242, + 0x263c83, + 0x4c6e8e82, + 0x2e8e83, + 0x4caea942, + 0x4cf132c2, + 0x2e9a04, + 0x21ea46, + 0x343305, + 0x2ea303, + 0x27a5c6, + 0x22b644, + 0x4d203942, + 0x2b6e84, + 0x2c070a, + 0x387f47, + 0x237e46, + 0x2d0d47, + 0x22d3c3, + 0x24f088, + 0x25ab8b, + 0x302b45, + 0x2b7285, + 0x2b7286, + 0x217c04, + 0x323908, + 0x203943, + 0x216a84, + 0x216a87, + 0x342fc6, + 0x31f2c6, + 0x29dfca, + 0x246104, + 0x24610a, + 0x322406, + 0x322407, + 0x251d87, + 0x276184, + 0x276189, + 0x266c05, + 0x239e4b, + 0x279443, + 0x20d243, + 0x229bc3, + 0x384904, + 0x4d6034c2, + 0x25b486, + 0x2ab2c5, + 0x2b2645, + 0x223e46, + 0x248684, + 0x4da00c02, + 0x223f44, + 0x4de0ed42, + 0x2307c4, + 0x225703, + 0x4e301102, + 0x308803, + 0x258606, + 0x4e602942, + 0x2d30c8, + 0x3abc84, + 0x3abc86, + 0x31abc6, + 0x253d44, + 0x380385, + 0x2035c8, + 0x204d07, + 0x20c307, + 0x20c30f, + 0x2945c6, + 0x220bc3, + 0x220bc4, + 0x228444, + 0x233083, + 0x223b44, + 0x22fe84, + 0x4ea2b382, + 0x28a843, + 0x23a203, + 0x4ee03682, + 0x253503, + 0x345843, + 0x21780a, + 0x29fa47, + 0x23b08c, + 0x23b346, + 0x23b886, + 0x23d307, + 0x22e547, + 0x241f49, + 0x21f4c4, + 0x242e44, + 0x4f24ecc2, + 0x4f604042, + 0x260944, + 0x375f06, + 0x22e9c8, + 0x380d04, + 0x269986, + 0x2c9a05, + 0x26ae48, + 0x208643, + 0x26df45, + 0x272903, + 0x25b2c3, + 0x25b2c4, + 0x2744c3, + 0x4fa50642, + 0x4fe01f82, + 0x279309, + 0x286985, + 0x288004, + 0x34adc5, + 0x213604, + 0x24be47, + 0x340005, + 0x2512c4, + 0x2512c8, + 0x2d5b06, + 0x2d9544, + 0x2de648, + 0x2e2747, + 0x50202742, + 0x2e6e04, + 0x2e63c4, + 0x2b80c7, + 0x50679d44, + 0x236b42, + 0x50a03a02, + 0x24ddc3, + 0x2dab84, + 0x235c43, + 0x2754c5, + 0x50e4acc2, + 0x2ed405, + 0x20b5c2, + 0x373e45, + 0x363a45, + 0x512198c2, + 0x2198c4, + 0x51609642, + 0x236506, + 0x2abb46, + 0x26d308, + 0x2b90c8, + 0x2e6c44, + 0x2f5905, + 0x302f49, + 0x34c804, + 0x2d0844, + 0x261603, + 0x216845, + 0x2bd7c7, + 0x277b44, + 0x2ea5cd, + 0x2eab42, + 0x2eab43, + 0x2eac03, + 0x51a04582, + 0x38a045, + 0x22b107, + 0x228c44, + 0x228c47, + 0x295fc9, + 0x2c0849, + 0x20c987, + 0x279043, + 0x279048, + 0x21db89, + 0x2ebb87, + 0x2ebf05, + 0x2ec6c6, + 0x2ecd06, + 0x2ece85, + 0x24c745, + 0x51e00c42, + 0x226605, + 0x2bae0a, + 0x2a7a08, + 0x21c906, + 0x2e6987, + 0x26b104, + 0x3ae3c7, + 0x2f0186, + 0x52200242, + 0x38d646, + 0x2f374a, + 0x2f4745, + 0x526d3382, + 0x52a56142, + 0x2dd906, + 0x35ee88, + 0x39f047, + 0x52e00602, + 0x213d03, + 0x200a06, + 0x30b804, + 0x313b46, + 0x34d186, + 0x37fb0a, + 0x397385, + 0x20fa86, + 0x2133c3, + 0x2133c4, + 0x2083c2, + 0x300a43, + 0x53248c82, + 0x2c5603, + 0x2f9484, + 0x2dca84, + 0x35efca, + 0x2468c3, + 0x287d48, + 0x279dca, + 0x234cc7, + 0x2f5d86, + 0x2363c4, + 0x28fcc2, + 0x208bc2, + 0x5360a6c2, + 0x245ac3, + 0x251b47, + 0x27a887, + 0x38ffcb, + 0x328284, + 0x30c5c7, + 0x22a646, + 0x219bc7, + 0x29f984, + 0x2c7d05, + 0x291bc5, + 0x53a1bf02, + 0x2225c6, + 0x33a583, + 0x2be6c2, + 0x32b206, + 0x53e0fe42, + 0x542012c2, + 0x2012c5, + 0x5461c642, + 0x54a05702, + 0x2e7dc5, + 0x38e705, + 0x20fb45, + 0x26c743, + 0x239ac5, + 0x2d1e87, + 0x2a94c5, + 0x3a0145, + 0x265904, + 0x243586, + 0x24aac4, + 0x54e05a02, + 0x27dbc5, + 0x2a2c87, + 0x2299c8, + 0x26ed46, + 0x26ed4d, + 0x26f609, + 0x26f612, + 0x2ee645, + 0x2f2703, + 0x55a0f042, + 0x2e7b84, + 0x207303, + 0x318f45, + 0x35d4c5, + 0x55e12dc2, + 0x36bb83, + 0x56244302, + 0x566cd782, + 0x56a13082, + 0x33f185, + 0x331083, + 0x264048, + 0x56e07e02, + 0x57201bc2, + 0x2a5a46, + 0x325c0a, + 0x20d803, + 0x239103, + 0x2edd83, + 0x57e03f02, + 0x66207942, + 0x66a0a302, + 0x201242, + 0x38d449, + 0x2bcb44, + 0x258f48, + 0x66eea342, + 0x67204102, + 0x2e4805, + 0x232d08, + 0x24a508, + 0x39e64c, + 0x239cc3, + 0x23bc82, + 0x6760c402, + 0x2c3c46, + 0x2f6c05, + 0x326903, + 0x32b746, + 0x2f6d46, + 0x235d83, + 0x2f8103, + 0x2f8b46, + 0x2f9904, + 0x276a86, + 0x2c7285, + 0x2f9c8a, + 0x233f84, + 0x2faac4, + 0x34da8a, + 0x67a74b82, + 0x271185, + 0x2fcf4a, + 0x2fdcc5, + 0x2fe844, + 0x2fe946, + 0x2feac4, + 0x228946, + 0x67e00282, + 0x237786, + 0x238845, + 0x201d07, + 0x300fc6, + 0x23d504, + 0x2c8047, + 0x32af06, + 0x267e05, + 0x267e07, + 0x39b987, + 0x39b98e, + 0x2232c6, + 0x32cb85, + 0x283a47, + 0x2f1c03, + 0x366e07, + 0x35ba05, + 0x212644, + 0x214382, + 0x267207, + 0x379104, + 0x238bc4, + 0x25a14b, + 0x21fdc3, + 0x286807, + 0x21fdc4, + 0x2a4c87, + 0x22ac83, + 0x32e98d, + 0x38a888, + 0x2511c4, + 0x2511c5, + 0x301705, + 0x2ff303, + 0x68214a02, + 0x300a03, + 0x301143, + 0x399f04, + 0x27e585, + 0x21eec7, + 0x213446, + 0x36f643, + 0x32b34b, + 0x32f70b, + 0x27338b, + 0x27e68a, + 0x2a55cb, + 0x2caf0b, + 0x2d33cc, + 0x2f8711, + 0x33d50a, + 0x35030b, + 0x37a40b, + 0x3aef8a, + 0x3b0f8a, + 0x301b0d, + 0x3032ce, + 0x30438b, + 0x30464a, + 0x305811, + 0x305c4a, + 0x30614b, + 0x30668e, + 0x30720c, + 0x3075cb, + 0x30788e, + 0x307c0c, + 0x3094ca, + 0x30a6cc, + 0x6870a9ca, + 0x30bbc9, + 0x30dd0a, + 0x30df8a, + 0x30e20b, + 0x31014e, + 0x3104d1, + 0x319509, + 0x31974a, + 0x31a00b, + 0x31baca, + 0x31c656, + 0x31de0b, + 0x3200ca, + 0x320dca, + 0x32528b, + 0x329a09, + 0x32f309, + 0x33154d, + 0x331dcb, + 0x332b0b, + 0x3334cb, + 0x333c89, + 0x3342ce, + 0x3346ca, + 0x335b0a, + 0x33620a, + 0x33698b, + 0x3371cb, + 0x33748d, + 0x338b0d, + 0x339950, + 0x339e0b, + 0x33b3cc, + 0x33c04b, + 0x33dc0b, + 0x33fb8b, + 0x34814b, + 0x348bcf, + 0x348f8b, + 0x349bca, + 0x34a2c9, + 0x34a709, + 0x34b0cb, + 0x34b38e, + 0x34e10b, + 0x34eecf, + 0x3512cb, + 0x35158b, + 0x35184b, + 0x351c8a, + 0x355889, + 0x35898f, + 0x36104c, + 0x36150c, + 0x36278e, + 0x362fcf, + 0x36338e, + 0x363e90, + 0x36428f, + 0x36574e, + 0x365c0c, + 0x365f12, + 0x367b11, + 0x3680ce, + 0x36850e, + 0x368a4e, + 0x368dcf, + 0x36918e, + 0x369513, + 0x3699d1, + 0x369e0e, + 0x36a28c, + 0x36ad53, + 0x36b550, + 0x36c18c, + 0x36c48c, + 0x36c94b, + 0x36dfce, + 0x36e64b, + 0x36ea8b, + 0x37090c, + 0x37954a, + 0x379c0c, + 0x379f0c, + 0x37a209, + 0x37b60b, + 0x37b8c8, + 0x37bac9, + 0x37bacf, + 0x37d40b, + 0x37e10a, + 0x38154c, + 0x383409, + 0x3837c8, + 0x384bcb, + 0x3852cb, + 0x38644a, + 0x3866cb, + 0x386c0c, + 0x387948, + 0x38aa8b, + 0x38d14b, + 0x39028b, + 0x391b0b, + 0x39b50b, + 0x39b7c9, + 0x39bd0d, + 0x3a0f8a, + 0x3a1ed7, + 0x3a3b58, + 0x3a8a09, + 0x3a9c0b, + 0x3aa3d4, + 0x3aa8cb, + 0x3aae4a, + 0x3ab2ca, + 0x3ab54b, + 0x3ac450, + 0x3ac851, + 0x3ad10a, + 0x3ae58d, + 0x3aec8d, + 0x3b134b, + 0x3b2746, + 0x2226c3, + 0x68a5b343, + 0x385d06, + 0x28e605, + 0x2d6487, + 0x33d3c6, + 0x1627342, + 0x2ad589, + 0x27a3c4, + 0x2d0348, + 0x245a03, + 0x2e7ac7, + 0x22eb82, + 0x2acc83, + 0x68e006c2, + 0x2c2686, + 0x2c36c4, + 0x328a44, + 0x23e083, + 0x23e085, + 0x696cd7c2, + 0x2dba04, + 0x2760c7, + 0x1662e02, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x2020c3, + 0x200882, + 0x77a48, + 0x206a82, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x20b803, + 0x3152d6, + 0x318493, + 0x30c449, + 0x324308, + 0x3a05c9, + 0x2fd0c6, + 0x30d250, + 0x303e13, + 0x208888, + 0x2407c7, + 0x27c647, + 0x29f48a, + 0x2f9509, + 0x34cf89, + 0x290ccb, + 0x340c06, + 0x32324a, + 0x222986, + 0x279fc3, + 0x2f1ec5, + 0x226b48, + 0x2365cd, + 0x35988c, + 0x238507, + 0x304e0d, + 0x2036c4, + 0x22ff8a, + 0x2308ca, + 0x230d8a, + 0x304107, + 0x23cfc7, + 0x23ff44, + 0x22f246, + 0x340fc4, + 0x2f0448, + 0x2aaa09, + 0x2b9906, + 0x2b9908, + 0x24328d, + 0x2c0a89, + 0x204188, + 0x25b147, + 0x20150a, + 0x24c286, + 0x259f07, + 0x2b1e04, + 0x248087, + 0x33a34a, + 0x25958e, + 0x210705, + 0x2ff04b, + 0x2f2509, + 0x22dec9, + 0x2a6347, + 0x3a278a, + 0x2b8007, + 0x2f2b09, + 0x359d48, + 0x3141cb, + 0x2cff85, + 0x2e038a, + 0x21ec49, + 0x32688a, + 0x2c384b, + 0x247f8b, + 0x290a55, + 0x2d59c5, + 0x25b1c5, + 0x2e24ca, + 0x2501ca, + 0x376507, + 0x220283, + 0x29e308, + 0x2cb24a, + 0x3abc86, + 0x241949, + 0x26ae48, + 0x2d9544, + 0x235c49, + 0x2b90c8, + 0x2dcd87, + 0x27dbc6, + 0x2a2c87, + 0x297b87, + 0x23f985, + 0x25388c, + 0x2511c5, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x206a82, + 0x22bf83, + 0x24c083, + 0x2020c3, + 0x204703, + 0x22bf83, + 0x24c083, + 0x222f43, + 0x204703, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x77a48, + 0x206a82, + 0x200e42, + 0x230fc2, + 0x207382, + 0x203202, + 0x2d3cc2, + 0x462bf83, + 0x231b03, + 0x20f583, + 0x250cc3, + 0x202243, + 0x220ec3, + 0x24c083, + 0x204703, + 0x232dc3, + 0x77a48, + 0x329904, + 0x25fe87, + 0x262203, + 0x3395c4, + 0x22ea83, + 0x286c03, + 0x250cc3, + 0x200882, + 0x127883, + 0x5606a82, + 0x230fc2, + 0x23c4, + 0x200fc2, + 0xe1c44, + 0x77a48, + 0x20e503, + 0x2cd683, + 0x5e2bf83, + 0x22ff84, + 0x6231b03, + 0x6650cc3, + 0x20b542, + 0x2023c4, + 0x24c083, + 0x2f1d03, + 0x2018c2, + 0x204703, + 0x21f0c2, + 0x2e9943, + 0x202942, + 0x207703, + 0x26af03, + 0x201d02, + 0x77a48, + 0x20e503, + 0x2f1d03, + 0x2018c2, + 0x2e9943, + 0x202942, + 0x207703, + 0x26af03, + 0x201d02, + 0x2e9943, + 0x202942, + 0x207703, + 0x26af03, + 0x201d02, + 0x22bf83, + 0x327883, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x202243, + 0x220ec3, + 0x211004, + 0x24c083, + 0x204703, + 0x209202, + 0x21d603, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x327883, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x24c083, + 0x204703, + 0x2ebf05, + 0x212dc2, + 0x200882, + 0x77a48, + 0x250cc3, + 0x260e41, + 0x20bd81, + 0x260e01, + 0x20bb01, + 0x275d81, + 0x275e41, + 0x262281, + 0x24b581, + 0x2f8901, + 0x301dc1, + 0x200141, + 0x200001, + 0x77a48, + 0x200481, + 0x200741, + 0x200081, + 0x201501, + 0x2007c1, + 0x200901, + 0x200041, + 0x202381, + 0x2001c1, + 0x2000c1, + 0x200341, + 0x200cc1, + 0x200fc1, + 0x200ac1, + 0x213041, + 0x200c01, + 0x200241, + 0x200a01, + 0x2002c1, + 0x200281, + 0x201d01, + 0x2041c1, + 0x200781, + 0x200641, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x200fc2, + 0x204703, + 0x142b87, + 0x1c106, + 0x18a4a, + 0x89808, + 0x51688, + 0x51a47, + 0x60f46, + 0xcdfc5, + 0x62145, + 0x72606, + 0x122706, + 0x223504, + 0x3212c7, + 0x77a48, + 0x2c8144, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x250cc3, + 0x202243, + 0x220ec3, + 0x24c083, + 0x204703, + 0x212dc2, + 0x2b6f03, + 0x214583, + 0x279643, + 0x202a82, + 0x249c83, + 0x201e03, + 0x2056c3, + 0x200001, + 0x2075c3, + 0x276844, + 0x3355c3, + 0x30da43, + 0x21eb83, + 0x378d83, + 0xa22bf83, + 0x234a44, + 0x21eb43, + 0x22e0c3, + 0x231b03, + 0x231843, + 0x211a83, + 0x2a2383, + 0x30d9c3, + 0x226083, + 0x2143c3, + 0x24ce04, + 0x23d6c2, + 0x250f43, + 0x2579c3, + 0x279003, + 0x260d83, + 0x345903, + 0x250cc3, + 0x2e87c3, + 0x2037c3, + 0x2023c3, + 0x249283, + 0x35d7c3, + 0x300b83, + 0x387883, + 0x200983, + 0x232003, + 0x220ec3, + 0x21e782, + 0x28a503, + 0x24c083, + 0x16020c3, + 0x255bc3, + 0x232943, + 0x212c03, + 0x204703, + 0x20b103, + 0x21d603, + 0x23b303, + 0x2f8183, + 0x2e9b03, + 0x303b85, + 0x2298c3, + 0x2e9b43, + 0x2eb283, + 0x2133c4, + 0x25a903, + 0x32be83, + 0x277083, + 0x232dc3, + 0x212dc2, + 0x239cc3, + 0x2fb8c4, + 0x238bc4, + 0x24cd43, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x206a82, + 0x204703, + 0xb62bf83, + 0x250cc3, + 0x220ec3, + 0x20dd02, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x6c2, + 0x201142, + 0x223342, + 0x77a48, + 0x6a82, + 0x2338c2, + 0x207082, + 0x23eac2, + 0x202402, + 0x209142, + 0x62145, + 0x202702, + 0x2018c2, + 0x207902, + 0x201702, + 0x200ac2, + 0x386d02, + 0x203a02, + 0x227d82, + 0x117c0d, + 0xed209, + 0x4a30b, + 0xd1d48, + 0x472c9, + 0x250cc3, + 0x77a48, + 0x77a48, + 0x52946, + 0x200882, + 0x223504, + 0x206a82, + 0x22bf83, + 0x200e42, + 0x231b03, + 0x20f582, + 0x2c8144, + 0x202243, + 0x20ad82, + 0x24c083, + 0x200fc2, + 0x204703, + 0x25b1c6, + 0x30e7cf, + 0x701683, + 0x77a48, + 0x206a82, + 0x20f583, + 0x250cc3, + 0x220ec3, + 0x1479b8b, + 0x206a82, + 0x22bf83, + 0x250cc3, + 0x24c083, + 0x200882, + 0x206b82, + 0x20a882, + 0xea2bf83, + 0x23e902, + 0x231b03, + 0x249242, + 0x225902, + 0x250cc3, + 0x224242, + 0x24b342, + 0x24cd02, + 0x204242, + 0x28cf02, + 0x205302, + 0x200902, + 0x210442, + 0x20b882, + 0x217e82, + 0x2ad442, + 0x23ba82, + 0x312042, + 0x24dcc2, + 0x220ec3, + 0x202ac2, + 0x24c083, + 0x243482, + 0x273342, + 0x204703, + 0x249d02, + 0x203682, + 0x24ecc2, + 0x201f82, + 0x2198c2, + 0x2d3382, + 0x21bf02, + 0x244302, + 0x222742, + 0x30464a, + 0x349bca, + 0x37f58a, + 0x3b28c2, + 0x20b642, + 0x245d02, + 0xeeaef89, + 0xf26050a, + 0xf42e107, + 0xbac2, + 0x6050a, + 0x247204, + 0xfe2bf83, + 0x231b03, + 0x248fc4, + 0x250cc3, + 0x2023c4, + 0x202243, + 0x220ec3, + 0x24c083, + 0x2020c3, + 0x204703, + 0x2298c3, + 0x2232c3, + 0x77a48, + 0x1460ec4, + 0x60745, + 0x5f68a, + 0x10a642, + 0x17e606, + 0x106aef89, + 0x142d47, + 0x1e02, + 0x1ab7ca, + 0xda987, + 0x77a48, + 0xfff08, + 0xd8c7, + 0x1181d10b, + 0x3482, + 0x1a0947, + 0xdc0a, + 0x19f20f, + 0x124b4f, + 0x1eb82, + 0x6a82, + 0xa22c8, + 0xec94a, + 0x143f48, + 0x1582, + 0x13564b, + 0x16fcc8, + 0x7f087, + 0xdaa8a, + 0x58a4b, + 0x172cc9, + 0x16fbc7, + 0xf564c, + 0xb587, + 0xd0b0a, + 0x14bcc8, + 0xf20ce, + 0x5360e, + 0xda7cb, + 0x17664b, + 0xecf4b, + 0x1c109, + 0x1df4b, + 0x22d8d, + 0x24b0b, + 0x277cd, + 0x2b70d, + 0x12c9ca, + 0x38a0b, + 0x5910b, + 0x67505, + 0x10b510, + 0x14338f, + 0xe37cf, + 0x1e34d, + 0x76650, + 0x8b02, + 0x11f24008, + 0x142a08, + 0x122e4205, + 0x47d0b, + 0x4f508, + 0x17680a, + 0x58189, + 0x625c7, + 0x62907, + 0x62ac7, + 0x656c7, + 0x660c7, + 0x663c7, + 0x67807, + 0x68687, + 0x69007, + 0x691c7, + 0x6a487, + 0x6a647, + 0x6a807, + 0x6a9c7, + 0x6acc7, + 0x6b347, + 0x6c307, + 0x6c8c7, + 0x6d087, + 0x6d807, + 0x6d9c7, + 0x6ddc7, + 0x6e307, + 0x6e507, + 0x6e7c7, + 0x6e987, + 0x6eb47, + 0x6f087, + 0x6fa87, + 0x70547, + 0x72d47, + 0x73007, + 0x73647, + 0x73807, + 0x73b87, + 0x749c7, + 0x74c47, + 0x75047, + 0x758c7, + 0x75a87, + 0x75ec7, + 0x76c07, + 0x76f07, + 0x77147, + 0x77307, + 0x77687, + 0x78007, + 0xd502, + 0x44c4a, + 0xf8407, + 0x124c8a0b, + 0x14c8a16, + 0x1bb11, + 0xdf0ca, + 0xa214a, + 0x52946, + 0x18e90b, + 0x10702, + 0x184551, + 0x99ac9, + 0x92dc9, + 0x10442, + 0x9ec8a, + 0xa3849, + 0xa3f4f, + 0xa48ce, + 0xa5408, + 0x134c2, + 0x799c9, + 0x1779ce, + 0xac08c, + 0xd438f, + 0x194a8e, + 0x1378c, + 0x18549, + 0x19451, + 0x1ae88, + 0x13ab12, + 0x12bb8d, + 0x2f10d, + 0x398cb, + 0x43755, + 0x44b09, + 0x4540a, + 0x57b49, + 0x5bb90, + 0x6a1cb, + 0x7be8f, + 0x7ce4b, + 0x8048c, + 0x80e50, + 0x85a0a, + 0x8a3cd, + 0x13f80e, + 0x14aa0a, + 0x8f30c, + 0x97854, + 0x99751, + 0x9cc0b, + 0x9de8f, + 0xab18d, + 0xaba0e, + 0xdcc4c, + 0x15eacc, + 0xdc94b, + 0xe8a4e, + 0xeb550, + 0x12e34b, + 0x16a70d, + 0xb48cf, + 0xb83cc, + 0xb978e, + 0xb9f91, + 0xbbd0c, + 0x119e47, + 0xc174d, + 0xc60cc, + 0xd5c50, + 0xe608d, + 0xfc987, + 0xeecd0, + 0xf3d88, + 0xf494b, + 0x16f84f, + 0x15bd88, + 0xdf2cd, + 0x173dd0, + 0xafec3, + 0xac82, + 0x2bb09, + 0x5340a, + 0xfb906, + 0x128de7c9, + 0x11e03, + 0x10ad11, + 0xccf47, + 0xd36d0, + 0xd3b8c, + 0xd4d85, + 0x1189c8, + 0x19c9ca, + 0x1976c7, + 0x1042, + 0x6184a, + 0xe3b09, + 0x34aca, + 0x19ef8f, + 0x4160b, + 0x1283cc, + 0x128692, + 0xadd85, + 0x161b4a, + 0x12ee1545, + 0x1132c3, + 0x186d02, + 0xe9e4a, + 0xcfc88, + 0x124ac7, + 0x34c2, + 0xed42, + 0x2942, + 0x1a7a10, + 0x4042, + 0x2e9cf, + 0x72606, + 0x176c8e, + 0xd7c0b, + 0x14ac08, + 0xc9d49, + 0x17cbd2, + 0x404d, + 0x496c8, + 0x4a1c9, + 0x4c40d, + 0x4e7c9, + 0x52a8b, + 0x55d88, + 0x5f4c8, + 0x67f88, + 0x68209, + 0x6840a, + 0x6898c, + 0xea08a, + 0xf81c7, + 0x1684d, + 0xed84b, + 0x7a1cc, + 0x65910, + 0x1bc2, + 0xd65cd, + 0x3f02, + 0x7942, + 0xf810a, + 0xdefca, + 0xe79cb, + 0x592cc, + 0xffc8e, + 0x199fcd, + 0xf2f88, + 0x6c2, + 0x10b6778e, + 0x10c2e107, + 0x111ab089, + 0x129c3, + 0x1171b7cc, + 0xbac2, + 0x146151, + 0x1676d1, + 0x176fd1, + 0x131111, + 0x11b70f, + 0x11fdcc, + 0x124f4d, + 0x15c8cd, + 0x16da95, + 0xbacc, + 0x50b50, + 0x1091cc, + 0x10f6cc, + 0x4f2c9, + 0xbac2, + 0x14620e, + 0x16778e, + 0x17708e, + 0x1311ce, + 0x11b7cc, + 0x11fe89, + 0xbb89, + 0x50c0d, + 0x109289, + 0x10f789, + 0x158543, + 0x1892c3, + 0xbac2, + 0xd2d05, + 0x1ab7c4, + 0x135a04, + 0x181444, + 0x17e004, + 0x17a784, + 0x142d44, + 0x1424703, + 0x1416703, + 0xf2b84, + 0x8b02, + 0x199fc3, + 0x200882, + 0x206a82, + 0x200e42, + 0x209342, + 0x20f582, + 0x200fc2, + 0x202942, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c3, + 0x24c083, + 0x204703, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x24c083, + 0x204703, + 0x39f83, + 0x250cc3, + 0x200882, + 0x327883, + 0x14a2bf83, + 0x380d87, + 0x250cc3, + 0x39a883, + 0x211004, + 0x24c083, + 0x204703, + 0x24e9ca, + 0x25b1c5, + 0x21d603, + 0x2012c2, + 0x77a48, + 0x77a48, + 0x6a82, + 0x110842, + 0x1a0a85, + 0x77a48, + 0x2bf83, + 0xf2447, + 0xcd44f, + 0xfb984, + 0x172e4a, + 0xabcc7, + 0x18908a, + 0x18ed8a, + 0xfb906, + 0x8a4d, + 0x127883, + 0x77a48, + 0x6a82, + 0x48fc4, + 0x86d83, + 0xebf05, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x201e03, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x292f83, + 0x2232c3, + 0x201e03, + 0x223504, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22a543, + 0x22bf83, + 0x231b03, + 0x210c43, + 0x20f583, + 0x250cc3, + 0x2023c4, + 0x265603, + 0x232003, + 0x220ec3, + 0x24c083, + 0x204703, + 0x21d603, + 0x200a43, + 0x16e2bf83, + 0x231b03, + 0x245e83, + 0x250cc3, + 0x2805c3, + 0x232003, + 0x204703, + 0x208583, + 0x325ec4, + 0x77a48, + 0x1762bf83, + 0x231b03, + 0x2a54c3, + 0x250cc3, + 0x220ec3, + 0x211004, + 0x24c083, + 0x204703, + 0x220303, + 0x77a48, + 0x17e2bf83, + 0x231b03, + 0x20f583, + 0x2020c3, + 0x204703, + 0x77a48, + 0x142e107, + 0x327883, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x211004, + 0x24c083, + 0x204703, + 0x175d04, + 0x340dc5, + 0x77a48, + 0x742, + 0x33303, + 0x2cf588, + 0x23ca87, + 0x223504, + 0x366b06, + 0x36d946, + 0x77a48, + 0x23bd43, + 0x2e31c9, + 0x2b3f55, + 0xb3f5f, + 0x22bf83, + 0x334fd2, + 0x1011c6, + 0x13b685, + 0x17680a, + 0x58189, + 0x334d8f, + 0x2c8144, + 0x23c485, + 0x35d590, + 0x324507, + 0x2020c3, + 0x255bc8, + 0x2d2d8a, + 0x241204, + 0x2e0f83, + 0x25b1c6, + 0x2012c2, + 0x387d0b, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x2e8283, + 0x206a82, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x39a883, + 0x206f83, + 0x204703, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x24c083, + 0x204703, + 0x200882, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x223504, + 0x22bf83, + 0x231b03, + 0x30db04, + 0x24c083, + 0x204703, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x2037c3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2614c3, + 0x6ab03, + 0x19a883, + 0x24c083, + 0x204703, + 0x30464a, + 0x31c409, + 0x33e2cb, + 0x33e94a, + 0x349bca, + 0x356a0b, + 0x36f44a, + 0x37954a, + 0x37f58a, + 0x37f80b, + 0x39c709, + 0x39e40a, + 0x39e94b, + 0x3aab8b, + 0x3b0d4a, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x220ec3, + 0x24c083, + 0x204703, + 0x77a48, + 0x22bf83, + 0x2625c4, + 0x219242, + 0x211004, + 0x278b45, + 0x201e03, + 0x223504, + 0x22bf83, + 0x234a44, + 0x231b03, + 0x248fc4, + 0x2c8144, + 0x2023c4, + 0x232003, + 0x24c083, + 0x204703, + 0x297985, + 0x22a543, + 0x21d603, + 0x25ad03, + 0x2512c4, + 0x260e04, + 0x279645, + 0x77a48, + 0x2fa644, + 0x221c46, + 0x287e44, + 0x206a82, + 0x364707, + 0x24b847, + 0x246a04, + 0x2569c5, + 0x2e6285, + 0x2a8e05, + 0x2023c4, + 0x316208, + 0x2031c6, + 0x2e6ec8, + 0x23e385, + 0x2cff85, + 0x214984, + 0x204703, + 0x2e1c44, + 0x355bc6, + 0x25b2c3, + 0x2512c4, + 0x269bc5, + 0x233504, + 0x399e44, + 0x2012c2, + 0x24ec06, + 0x392506, + 0x2f6c05, + 0x200882, + 0x327883, + 0x1d606a82, + 0x233004, + 0x20f582, + 0x220ec3, + 0x20b602, + 0x24c083, + 0x200fc2, + 0x20b803, + 0x2232c3, + 0x77a48, + 0x77a48, + 0x250cc3, + 0x200882, + 0x1e206a82, + 0x250cc3, + 0x26a783, + 0x265603, + 0x31d184, + 0x24c083, + 0x204703, + 0x77a48, + 0x200882, + 0x1ea06a82, + 0x22bf83, + 0x24c083, + 0x204703, + 0x20f042, + 0x212dc2, + 0x39a883, + 0x2d9f83, + 0x200882, + 0x77a48, + 0x206a82, + 0x231b03, + 0x248fc4, + 0x209d03, + 0x250cc3, + 0x2037c3, + 0x220ec3, + 0x24c083, + 0x21a883, + 0x204703, + 0x220283, + 0x125513, + 0x134914, + 0x145c6, + 0x1c106, + 0x514c7, + 0x7a709, + 0x141c0a, + 0x896cd, + 0x11790c, + 0x17ef0a, + 0x62145, + 0x16d408, + 0x72606, + 0x122706, + 0x208b02, + 0x1ab987, + 0x22bf83, + 0xd0a85, + 0x1bb06, + 0x8d1ca, + 0xacf83, + 0x7a6c5, + 0xd003, + 0x18e9cc, + 0x1ade48, + 0x13f348, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x200882, + 0x206a82, + 0x250cc3, + 0x20b542, + 0x24c083, + 0x204703, + 0x20b803, + 0x362fcf, + 0x36338e, + 0x77a48, + 0x22bf83, + 0x43f87, + 0x231b03, + 0x250cc3, + 0x202243, + 0x24c083, + 0x204703, + 0x21fc03, + 0x264fc7, + 0x201c42, + 0x291d49, + 0x200ec2, + 0x3a7d0b, + 0x28b2ca, + 0x28cc09, + 0x200d82, + 0x261046, + 0x254b15, + 0x3a7e55, + 0x257393, + 0x3a83d3, + 0x204a42, + 0x20c905, + 0x32178c, + 0x21b24b, + 0x2543c5, + 0x20b0c2, + 0x287302, + 0x3747c6, + 0x201e02, + 0x25f986, + 0x34be4d, + 0x36fe4c, + 0x30b584, + 0x2009c2, + 0x209ec2, + 0x33aec8, + 0x204a02, + 0x32f986, + 0x2d2944, + 0x254cd5, + 0x257513, + 0x212903, + 0x34b6ca, + 0x35af47, + 0x2e7c09, + 0x229087, + 0x305b42, + 0x200002, + 0x200006, + 0x206e82, + 0x77a48, + 0x212742, + 0x212bc2, + 0x3994c7, + 0x35bac7, + 0x21f085, + 0x203482, + 0x220247, + 0x220408, + 0x23a242, + 0x2715c2, + 0x22ce42, + 0x201482, + 0x300cc8, + 0x21a903, + 0x286f48, + 0x2c77cd, + 0x214683, + 0x2e3f48, + 0x23218f, + 0x23254e, + 0x22338a, + 0x299e51, + 0x29a2d0, + 0x2b2d0d, + 0x2b304c, + 0x20e607, + 0x34b847, + 0x366bc9, + 0x245bc2, + 0x2004c2, + 0x252ecc, + 0x2531cb, + 0x2008c2, + 0x2dcb06, + 0x2092c2, + 0x2036c2, + 0x21eb82, + 0x206a82, + 0x3929c4, + 0x23a547, + 0x208942, + 0x23fac7, + 0x241007, + 0x217442, + 0x20e542, + 0x243e45, + 0x200682, + 0x26794e, + 0x27d94d, + 0x231b03, + 0x377f8e, + 0x2dc3cd, + 0x229343, + 0x203982, + 0x209f44, + 0x245b82, + 0x201502, + 0x34a4c5, + 0x351ac7, + 0x36ed02, + 0x209342, + 0x248847, + 0x24d248, + 0x23d6c2, + 0x2ade06, + 0x252d4c, + 0x25308b, + 0x20db02, + 0x25c18f, + 0x25c550, + 0x25c94f, + 0x25cd15, + 0x25d254, + 0x25d74e, + 0x25dace, + 0x25de4f, + 0x25e20e, + 0x25e594, + 0x25ea93, + 0x25ef4d, + 0x2781c9, + 0x28a283, + 0x2007c2, + 0x31a605, + 0x209d06, + 0x20f582, + 0x270387, + 0x250cc3, + 0x210702, + 0x235e48, + 0x29a091, + 0x29a4d0, + 0x200942, + 0x21e747, + 0x203fc2, + 0x2cec87, + 0x20ac82, + 0x2cf389, + 0x374787, + 0x34aec8, + 0x2261c6, + 0x2d9e83, + 0x322945, + 0x231d82, + 0x200402, + 0x200405, + 0x22aa05, + 0x200f02, + 0x233583, + 0x233587, + 0x200f07, + 0x2013c2, + 0x301344, + 0x2025c3, + 0x2bfb89, + 0x2da648, + 0x217382, + 0x203702, + 0x222047, + 0x224605, + 0x2a4248, + 0x20c5c7, + 0x201dc3, + 0x2a1b86, + 0x2b2b8d, + 0x2b2f0c, + 0x27e146, + 0x207082, + 0x206a42, + 0x20f842, + 0x23200f, + 0x23240e, + 0x2e6307, + 0x200342, + 0x30a2c5, + 0x30a2c6, + 0x250702, + 0x202ac2, + 0x215346, + 0x291f83, + 0x2cebc6, + 0x2c1105, + 0x2c110d, + 0x2c1c55, + 0x2c240c, + 0x2c2c0d, + 0x2c32d2, + 0x203642, + 0x208b82, + 0x201842, + 0x2e4f06, + 0x2abf46, + 0x201042, + 0x209d86, + 0x207902, + 0x223d85, + 0x203202, + 0x267a89, + 0x27074c, + 0x270a8b, + 0x200fc2, + 0x24d688, + 0x20cb42, + 0x209242, + 0x21b006, + 0x3683c5, + 0x21c307, + 0x253b45, + 0x299cc5, + 0x244002, + 0x322882, + 0x200ac2, + 0x27c187, + 0x2d0e4d, + 0x2d11cc, + 0x275587, + 0x20b142, + 0x224742, + 0x242988, + 0x22bc88, + 0x2d57c8, + 0x2df284, + 0x2e8cc7, + 0x2db883, + 0x2aff02, + 0x20d102, + 0x2dfa09, + 0x3a4507, + 0x208582, + 0x273cc5, + 0x242242, + 0x22e1c2, + 0x27b6c3, + 0x27b6c6, + 0x2e8282, + 0x2e98c2, + 0x200d42, + 0x30c286, + 0x209e87, + 0x201442, + 0x203942, + 0x286d8f, + 0x377dcd, + 0x35914e, + 0x2dc24c, + 0x204742, + 0x205fc2, + 0x226005, + 0x3b1146, + 0x214442, + 0x201002, + 0x2034c2, + 0x20c544, + 0x2c7644, + 0x338546, + 0x202942, + 0x27c9c7, + 0x224d83, + 0x226708, + 0x228048, + 0x32ba07, + 0x22ed46, + 0x202742, + 0x238f03, + 0x23ce07, + 0x26e186, + 0x2e4e45, + 0x3497c8, + 0x209642, + 0x321e87, + 0x20b702, + 0x2eab42, + 0x204002, + 0x2df7c9, + 0x200242, + 0x200a02, + 0x275803, + 0x3a0007, + 0x201f02, + 0x2708cc, + 0x270bcb, + 0x27e1c6, + 0x20cf45, + 0x21c642, + 0x205702, + 0x2b2886, + 0x26ba43, + 0x357a07, + 0x249842, + 0x205a02, + 0x254995, + 0x3a8015, + 0x257253, + 0x3a8553, + 0x269cc7, + 0x277c08, + 0x277c10, + 0x278c4f, + 0x28b093, + 0x28c9d2, + 0x291910, + 0x2a248f, + 0x2a8992, + 0x2fcb11, + 0x2f4bd3, + 0x353a92, + 0x320a0f, + 0x2bb04e, + 0x2c0c92, + 0x2c8f51, + 0x2cb48f, + 0x2cc20e, + 0x2cd9d1, + 0x2fbb90, + 0x2db252, + 0x2df5d1, + 0x2e5346, + 0x2e6b07, + 0x2f9347, + 0x202c42, + 0x281b85, + 0x3471c7, + 0x212dc2, + 0x206d02, + 0x229585, + 0x2212c3, + 0x2798c6, + 0x2d100d, + 0x2d134c, + 0x201242, + 0x32160b, + 0x21b10a, + 0x2eae8a, + 0x2b1649, + 0x2dde0b, + 0x20c70d, + 0x362b4c, + 0x224f0a, + 0x22a18c, + 0x24470b, + 0x33bc8c, + 0x25424b, + 0x2706c3, + 0x277806, + 0x2ce482, + 0x2ea342, + 0x221f83, + 0x204102, + 0x204c03, + 0x2562c6, + 0x25cec7, + 0x26d686, + 0x2ecb08, + 0x22b988, + 0x2f3906, + 0x20c402, + 0x2f65cd, + 0x2f690c, + 0x2c8207, + 0x2fa507, + 0x214602, + 0x2342c2, + 0x23cd82, + 0x266dc2, + 0x206a82, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x211004, + 0x24c083, + 0x204703, + 0x20b803, + 0x200882, + 0x200702, + 0x21a8f085, + 0x21e07f05, + 0x2230bf46, + 0x77a48, + 0x226ae4c5, + 0x206a82, + 0x200e42, + 0x22b41485, + 0x22e80385, + 0x23281247, + 0x23615009, + 0x23a58804, + 0x20f582, + 0x210702, + 0x23f6ed45, + 0x24291589, + 0x24772b48, + 0x24aac5c5, + 0x24ebf307, + 0x2521f7c8, + 0x256d8045, + 0x25a01c06, + 0x25e71249, + 0x263ac248, + 0x266b9dc8, + 0x26a981ca, + 0x26e4a044, + 0x272c96c5, + 0x276b5708, + 0x27a6b485, + 0x21a982, + 0x27e00343, + 0x282a14c6, + 0x28645248, + 0x28a067c6, + 0x28ece688, + 0x29321ac6, + 0x29731b84, + 0x202b42, + 0x29a3bf07, + 0x29ea6f84, + 0x2a27bc47, + 0x2a713c87, + 0x200fc2, + 0x2aa98f45, + 0x2af24244, + 0x2b2fc647, + 0x2b623747, + 0x2ba85546, + 0x2be5b885, + 0x2c293d47, + 0x2c6d5388, + 0x2cb1a987, + 0x2cf6ab09, + 0x2d38e705, + 0x2d75e547, + 0x2da8e7c6, + 0x2de61248, + 0x33a60d, + 0x244189, + 0x2e430b, + 0x24b38b, + 0x275c0b, + 0x2a3a4b, + 0x30220b, + 0x3024cb, + 0x302d49, + 0x3048cb, + 0x304b8b, + 0x3053cb, + 0x305eca, + 0x30640a, + 0x306a0c, + 0x309b0b, + 0x30a44a, + 0x3199ca, + 0x32aa8e, + 0x32dace, + 0x32de4a, + 0x33188a, + 0x3323cb, + 0x33268b, + 0x33320b, + 0x34e64b, + 0x34ec4a, + 0x34f90b, + 0x34fbca, + 0x34fe4a, + 0x3500ca, + 0x37110b, + 0x37a88b, + 0x37c1ce, + 0x37c54b, + 0x38618b, + 0x3874cb, + 0x38ad4a, + 0x38afc9, + 0x38b20a, + 0x38c88a, + 0x39d0cb, + 0x39ec0b, + 0x39fa8a, + 0x3a120b, + 0x3a728b, + 0x3b078b, + 0x2e283308, + 0x2e688e49, + 0x2eb61e09, + 0x2eed0348, + 0x3382c5, + 0x207503, + 0x202d44, + 0x3996c5, + 0x258546, + 0x266545, + 0x288644, + 0x270288, + 0x218d45, + 0x290444, + 0x205c87, + 0x29c18a, + 0x3411ca, + 0x387307, + 0x2078c7, + 0x2f47c7, + 0x327dc7, + 0x2b6085, + 0x323d06, + 0x33f687, + 0x26dcc4, + 0x320686, + 0x3a6bc6, + 0x2017c5, + 0x24fa04, + 0x2c0406, + 0x29b587, + 0x32c646, + 0x305107, + 0x27f203, + 0x24e386, + 0x230705, + 0x281347, + 0x2bda0a, + 0x235f44, + 0x21b888, + 0x2eb949, + 0x2d1b07, + 0x330c06, + 0x327b88, + 0x314009, + 0x2396c4, + 0x3619c4, + 0x2fb3c5, + 0x210f08, + 0x2be207, + 0x2a9209, + 0x231548, + 0x2feb46, + 0x243586, + 0x2965c8, + 0x3739c6, + 0x207f05, + 0x285606, + 0x27c348, + 0x231f06, + 0x251f0b, + 0x2343c6, + 0x297d4d, + 0x3a26c5, + 0x2a6e46, + 0x2065c5, + 0x29ae89, + 0x32a347, + 0x3825c8, + 0x2d5046, + 0x296cc9, + 0x3a0486, + 0x2bd985, + 0x237446, + 0x2a8406, + 0x2c47c9, + 0x2394c6, + 0x248547, + 0x2d9045, + 0x203203, + 0x252085, + 0x298007, + 0x327706, + 0x3a25c9, + 0x30bf46, + 0x285846, + 0x205149, + 0x285009, + 0x29f347, + 0x322b08, + 0x28dc89, + 0x281808, + 0x31ce86, + 0x2cc985, + 0x30b24a, + 0x2858c6, + 0x380c06, + 0x2a1685, + 0x3843c8, + 0x2109c7, + 0x22f98a, + 0x249406, + 0x2f9a85, + 0x330e86, + 0x263ec7, + 0x330ac7, + 0x2ef245, + 0x2bdb45, + 0x29f6c6, + 0x2ad006, + 0x383a06, + 0x333b84, + 0x2840c9, + 0x289d06, + 0x35104a, + 0x21a588, + 0x35e248, + 0x3411ca, + 0x3a3505, + 0x29b4c5, + 0x385b88, + 0x2c9448, + 0x36d747, + 0x211206, + 0x312e08, + 0x2e4947, + 0x2837c8, + 0x36a5c6, + 0x286148, + 0x2b3406, + 0x23e507, + 0x297706, + 0x2c0406, + 0x233bca, + 0x392a46, + 0x2cc989, + 0x2ae7c6, + 0x2d224a, + 0x331b89, + 0x2f3a06, + 0x37ac04, + 0x31a6cd, + 0x2890c7, + 0x3157c6, + 0x2b9c85, + 0x3a0505, + 0x31abc6, + 0x274209, + 0x2b1c47, + 0x27d406, + 0x2cd2c6, + 0x2886c9, + 0x2bf4c4, + 0x22c784, + 0x2073c8, + 0x256686, + 0x273d88, + 0x2373c8, + 0x282fc7, + 0x200849, + 0x383c07, + 0x2ae38a, + 0x236d8f, + 0x2463ca, + 0x225e05, + 0x27c585, + 0x21ac05, + 0x2d2887, + 0x20e243, + 0x322d08, + 0x2f7206, + 0x2f7309, + 0x2b0106, + 0x2c3107, + 0x296a89, + 0x3824c8, + 0x2a1747, + 0x3015c3, + 0x338345, + 0x20e1c5, + 0x3339cb, + 0x26b544, + 0x2d5f04, + 0x27af06, + 0x301947, + 0x397d8a, + 0x246c47, + 0x239747, + 0x280385, + 0x2043c5, + 0x2181c9, + 0x2c0406, + 0x246acd, + 0x359c85, + 0x302803, + 0x2102c3, + 0x30c385, + 0x352545, + 0x327b88, + 0x27de47, + 0x22c506, + 0x29cf86, + 0x229cc5, + 0x231dc7, + 0x207b47, + 0x203087, + 0x2c974a, + 0x24e448, + 0x333b84, + 0x383fc7, + 0x27f347, + 0x332046, + 0x269307, + 0x2b2288, + 0x361d08, + 0x26fd46, + 0x3450c8, + 0x239544, + 0x33f686, + 0x39aa46, + 0x375986, + 0x30cd86, + 0x22ef84, + 0x327e86, + 0x2b8c06, + 0x295b86, + 0x233bc6, + 0x210186, + 0x2aeec6, + 0x22c408, + 0x320508, + 0x2ca248, + 0x266748, + 0x385b06, + 0x213585, + 0x27cb06, + 0x2ac645, + 0x38a187, + 0x231605, + 0x215a43, + 0x207645, + 0x22cd44, + 0x2102c5, + 0x21d8c3, + 0x2fd4c7, + 0x319c88, + 0x3051c6, + 0x2d600d, + 0x27c546, + 0x295045, + 0x2bc283, + 0x2b50c9, + 0x2bf646, + 0x295646, + 0x29ec04, + 0x246347, + 0x233246, + 0x384205, + 0x233b83, + 0x203f04, + 0x27f506, + 0x2b0944, + 0x30eb08, + 0x396f89, + 0x32a849, + 0x29ea0a, + 0x310d8d, + 0x32fe07, + 0x380a86, + 0x2124c4, + 0x215009, + 0x2877c8, + 0x288cc6, + 0x267d06, + 0x269307, + 0x2c2806, + 0x225546, + 0x3a3606, + 0x313d0a, + 0x21f7c8, + 0x33aa05, + 0x282d09, + 0x283cca, + 0x2d6388, + 0x29abc8, + 0x2955c8, + 0x207f4c, + 0x2e8945, + 0x29d208, + 0x30a1c6, + 0x2d5646, + 0x379787, + 0x246b45, + 0x285785, + 0x32a709, + 0x214c87, + 0x2b2745, + 0x229f87, + 0x2102c3, + 0x2beb45, + 0x3aa1c8, + 0x2d4047, + 0x29aa89, + 0x2d9545, + 0x3074c4, + 0x2a0288, + 0x20e747, + 0x2a1908, + 0x34cdc8, + 0x35e9c5, + 0x23c806, + 0x252586, + 0x2e5e89, + 0x313687, + 0x2aca46, + 0x20b247, + 0x215403, + 0x258804, + 0x29c8c5, + 0x2589c4, + 0x360104, + 0x283587, + 0x209507, + 0x22f744, + 0x29a8d0, + 0x326fc7, + 0x2043c5, + 0x2e95cc, + 0x2b6084, + 0x2c6588, + 0x23e409, + 0x302086, + 0x33f488, + 0x240544, + 0x240548, + 0x384946, + 0x32d148, + 0x29c5c6, + 0x2c84cb, + 0x204a85, + 0x2c4308, + 0x21a084, + 0x284c8a, + 0x29aa89, + 0x2e0286, + 0x2d8b48, + 0x257a45, + 0x2fdac4, + 0x2c6486, + 0x202f48, + 0x283308, + 0x349546, + 0x37ff84, + 0x30b1c6, + 0x383c87, + 0x27bb47, + 0x26930f, + 0x208607, + 0x2f3ac7, + 0x2d5505, + 0x2efcc5, + 0x29f009, + 0x272346, + 0x281f05, + 0x285307, + 0x2d8e08, + 0x295c85, + 0x297706, + 0x21a3c8, + 0x2067ca, + 0x215448, + 0x3acd07, + 0x2371c6, + 0x282cc6, + 0x20e003, + 0x20fa43, + 0x283e89, + 0x28db09, + 0x2c6386, + 0x2d9545, + 0x2a7008, + 0x2d8b48, + 0x2ba5c8, + 0x3a368b, + 0x2d6247, + 0x2ff489, + 0x269588, + 0x33c444, + 0x2c48c8, + 0x28c489, + 0x2acd45, + 0x2d2787, + 0x2f7805, + 0x283208, + 0x28ef0b, + 0x293a90, + 0x2a6c45, + 0x219fcc, + 0x22c6c5, + 0x207203, + 0x2a7d46, + 0x2b7204, + 0x3397c6, + 0x29b587, + 0x215444, + 0x2422c8, + 0x322bcd, + 0x2d8a05, + 0x298f84, + 0x218404, + 0x282789, + 0x2a4e08, + 0x30bdc7, + 0x3849c8, + 0x284188, + 0x27d705, + 0x342607, + 0x27d687, + 0x2e2f87, + 0x2bdb49, + 0x2330c9, + 0x23fc46, + 0x2b3246, + 0x269546, + 0x26c105, + 0x3b0044, + 0x201b06, + 0x203c86, + 0x27d748, + 0x263b8b, + 0x26b987, + 0x2124c4, + 0x315c46, + 0x207047, + 0x2aeac5, + 0x316dc5, + 0x20f484, + 0x233046, + 0x201b88, + 0x215009, + 0x248446, + 0x287148, + 0x3842c6, + 0x332908, + 0x3ae14c, + 0x27d5c6, + 0x294d0d, + 0x29518b, + 0x248605, + 0x207c87, + 0x2395c6, + 0x330988, + 0x23fcc9, + 0x2e5ac8, + 0x2043c5, + 0x2f0807, + 0x281908, + 0x366909, + 0x23c0c6, + 0x24834a, + 0x330708, + 0x2e590b, + 0x2c6dcc, + 0x240648, + 0x27ec46, + 0x342008, + 0x208747, + 0x233349, + 0x29148d, + 0x29ba46, + 0x3a6cc8, + 0x3203c9, + 0x2b5ac8, + 0x286248, + 0x2b94cc, + 0x2ba7c7, + 0x2bb3c7, + 0x2bd985, + 0x2ee287, + 0x2d8cc8, + 0x2c6506, + 0x256acc, + 0x2e6808, + 0x2c5808, + 0x266a06, + 0x20df47, + 0x23fe44, + 0x266748, + 0x2dc68c, + 0x21c68c, + 0x225e85, + 0x393e07, + 0x37ff06, + 0x20dec6, + 0x29b048, + 0x3a4984, + 0x32c64b, + 0x2263cb, + 0x2371c6, + 0x322a47, + 0x328b05, + 0x273145, + 0x32c786, + 0x257a05, + 0x26b505, + 0x379a47, + 0x27b509, + 0x2344c4, + 0x3621c5, + 0x2d71c5, + 0x25b748, + 0x376005, + 0x2a7849, + 0x370587, + 0x37058b, + 0x2d1546, + 0x22c149, + 0x24f948, + 0x280d45, + 0x2e3088, + 0x233108, + 0x211807, + 0x282b87, + 0x283609, + 0x320447, + 0x38cf09, + 0x2aa34c, + 0x36aa08, + 0x2b5ec9, + 0x2b8247, + 0x284249, + 0x209647, + 0x2c6ec8, + 0x25ba85, + 0x33f606, + 0x2b9cc8, + 0x2d6a88, + 0x283b89, + 0x26b547, + 0x273205, + 0x216c49, + 0x28e046, + 0x28e7c4, + 0x2e5786, + 0x2450c8, + 0x248e07, + 0x263d88, + 0x345189, + 0x364f87, + 0x29c346, + 0x207d44, + 0x2076c9, + 0x342488, + 0x2668c7, + 0x323e06, + 0x20e286, + 0x380b84, + 0x326186, + 0x210243, + 0x2cf889, + 0x204a46, + 0x2a4485, + 0x29cf86, + 0x2a1a45, + 0x281d88, + 0x240387, + 0x35b546, + 0x3414c6, + 0x35e248, + 0x29f187, + 0x29ba85, + 0x29d488, + 0x38cc48, + 0x330708, + 0x22c585, + 0x33f686, + 0x32a609, + 0x252404, + 0x373acb, + 0x22524b, + 0x33a909, + 0x2102c3, + 0x2546c5, + 0x20ff46, + 0x267608, + 0x236d04, + 0x3051c6, + 0x2c9889, + 0x2c6845, + 0x379986, + 0x20e746, + 0x211184, + 0x2a064a, + 0x2a43c8, + 0x2d6a86, + 0x329185, + 0x20d707, + 0x378e07, + 0x23c804, + 0x225487, + 0x2315c4, + 0x2315c6, + 0x21a503, + 0x2bdb45, + 0x36fb05, + 0x20eac8, + 0x258905, + 0x27d309, + 0x266587, + 0x26658b, + 0x2a12cc, + 0x2a1eca, + 0x2bf307, + 0x202003, + 0x2e6408, + 0x22c745, + 0x295d05, + 0x338404, + 0x2c6dc6, + 0x23e406, + 0x3261c7, + 0x39998b, + 0x22ef84, + 0x382744, + 0x26fec4, + 0x2c4046, + 0x215444, + 0x211008, + 0x338205, + 0x29d545, + 0x2ba507, + 0x207d89, + 0x352545, + 0x31abca, + 0x2d8f49, + 0x2a104a, + 0x313e49, + 0x39cc04, + 0x2cd385, + 0x2c2908, + 0x2fc70b, + 0x2fb3c5, + 0x237546, + 0x214904, + 0x27d846, + 0x364e09, + 0x315d07, + 0x30c108, + 0x311106, + 0x383c07, + 0x283308, + 0x38fbc6, + 0x244a44, + 0x3636c7, + 0x34a905, + 0x350a47, + 0x201c04, + 0x239546, + 0x21fa48, + 0x295348, + 0x2ee007, + 0x378288, + 0x2b34c5, + 0x210104, + 0x3410c8, + 0x3321c4, + 0x211805, + 0x2efe04, + 0x2e4a47, + 0x289dc7, + 0x284388, + 0x2a1a86, + 0x258885, + 0x27d108, + 0x215648, + 0x29e949, + 0x225546, + 0x22fa08, + 0x284b0a, + 0x2aeb48, + 0x2d8045, + 0x27cd06, + 0x2740c8, + 0x2f08ca, + 0x24fb47, + 0x287bc5, + 0x294288, + 0x2ad9c4, + 0x384446, + 0x2bbb48, + 0x210186, + 0x264308, + 0x252247, + 0x205b86, + 0x37ac04, + 0x37e907, + 0x2fd904, + 0x364dc7, + 0x33924d, + 0x288b05, + 0x2d3e4b, + 0x29c6c6, + 0x24d788, + 0x242284, + 0x278906, + 0x27f506, + 0x342347, + 0x2949cd, + 0x2ab007, + 0x302748, + 0x24c9c5, + 0x288248, + 0x2be186, + 0x2b3548, + 0x217086, + 0x3448c7, + 0x345349, + 0x33ff07, + 0x288f88, + 0x276005, + 0x21f108, + 0x20de05, + 0x242b45, + 0x35a545, + 0x226103, + 0x285684, + 0x282d05, + 0x271249, + 0x2ffa06, + 0x2b2388, + 0x24bc05, + 0x32e087, + 0x24ba0a, + 0x3798c9, + 0x2a830a, + 0x2ca2c8, + 0x229dcc, + 0x28538d, + 0x334603, + 0x264208, + 0x203ec5, + 0x206586, + 0x382346, + 0x2d7b45, + 0x20b349, + 0x264745, + 0x27d108, + 0x255ac6, + 0x33cac6, + 0x2a0149, + 0x38f3c7, + 0x28f1c6, + 0x24b988, + 0x375888, + 0x2d0547, + 0x32d2ce, + 0x2be3c5, + 0x366805, + 0x210088, + 0x3978c7, + 0x20ca82, + 0x2b9044, + 0x3396ca, + 0x266988, + 0x207146, + 0x296bc8, + 0x252586, + 0x323708, + 0x2aca48, + 0x242b04, + 0x333805, + 0x687e44, + 0x687e44, + 0x687e44, + 0x204b03, + 0x20e106, + 0x27d5c6, + 0x29bd0c, + 0x205243, + 0x283cc6, + 0x21a4c4, + 0x2bf5c8, + 0x2c96c5, + 0x3397c6, + 0x2b5808, + 0x2cb1c6, + 0x35b4c6, + 0x327988, + 0x29c947, + 0x32ce49, + 0x306f8a, + 0x264944, + 0x231605, + 0x2a91c5, + 0x214e06, + 0x32fe46, + 0x2a7406, + 0x2eeb86, + 0x32cf84, + 0x32cf8b, + 0x2313c4, + 0x20d785, + 0x2ab905, + 0x283086, + 0x3b0588, + 0x285247, + 0x30bec4, + 0x259b83, + 0x2ad4c5, + 0x2e5647, + 0x2a2849, + 0x28514b, + 0x3261c7, + 0x20e9c7, + 0x2b5708, + 0x32e1c7, + 0x2a2a86, + 0x244448, + 0x2a584b, + 0x399606, + 0x217549, + 0x2a59c5, + 0x3015c3, + 0x379986, + 0x252148, + 0x214ec3, + 0x21cf03, + 0x283306, + 0x252586, + 0x38c60a, + 0x27ec85, + 0x27f34b, + 0x29cecb, + 0x2417c3, + 0x21fe03, + 0x2ae304, + 0x344d07, + 0x240644, + 0x207f44, + 0x30a044, + 0x2aee48, + 0x3290c8, + 0x35ad89, + 0x38e788, + 0x271407, + 0x233bc6, + 0x2b1fcf, + 0x2be506, + 0x2c9644, + 0x328f0a, + 0x2e5547, + 0x201846, + 0x28e809, + 0x35ad05, + 0x20ec05, + 0x35ae46, + 0x21f243, + 0x2ada09, + 0x21f946, + 0x344f49, + 0x397d86, + 0x2bdb45, + 0x226285, + 0x208603, + 0x344e48, + 0x3a5ac7, + 0x2f7204, + 0x2bf448, + 0x2c0184, + 0x2c5686, + 0x2a7d46, + 0x23ec06, + 0x2c41c9, + 0x295c85, + 0x2c0406, + 0x2697c9, + 0x3ad4c6, + 0x2aeec6, + 0x3895c6, + 0x215105, + 0x2efe06, + 0x3448c4, + 0x25ba85, + 0x2b9cc4, + 0x309f46, + 0x359c44, + 0x202c43, + 0x287885, + 0x232e08, + 0x22b547, + 0x2b3dc9, + 0x287ac8, + 0x296391, + 0x20e7ca, + 0x237107, + 0x2bc046, + 0x21a4c4, + 0x2b9dc8, + 0x22f488, + 0x29654a, + 0x2a760d, + 0x237446, + 0x327a86, + 0x37e9c6, + 0x2ef0c7, + 0x302805, + 0x261107, + 0x2bf505, + 0x3706c4, + 0x2a5286, + 0x326007, + 0x2ad70d, + 0x274007, + 0x270188, + 0x27d409, + 0x27cc06, + 0x23c045, + 0x21d904, + 0x2451c6, + 0x23c706, + 0x266b06, + 0x299108, + 0x215d03, + 0x210603, + 0x323ac5, + 0x376b86, + 0x2aca05, + 0x311308, + 0x29b74a, + 0x2ce804, + 0x2bf5c8, + 0x2955c8, + 0x282ec7, + 0x24bcc9, + 0x2b5408, + 0x215087, + 0x269ec6, + 0x21018a, + 0x245248, + 0x2c6c09, + 0x2a4ec8, + 0x221649, + 0x2e5bc7, + 0x349945, + 0x361f86, + 0x2c6388, + 0x24d908, + 0x28eb48, + 0x21acc8, + 0x20d785, + 0x200884, + 0x3a57c8, + 0x201944, + 0x313c44, + 0x2bdb45, + 0x290487, + 0x207b49, + 0x342147, + 0x2051c5, + 0x27b106, + 0x33f0c6, + 0x206944, + 0x2a0486, + 0x383f44, + 0x288146, + 0x3a4a46, + 0x219206, + 0x2043c5, + 0x3111c7, + 0x202003, + 0x3670c9, + 0x35e048, + 0x214f04, + 0x214f0d, + 0x295448, + 0x2f3f48, + 0x2c6b86, + 0x345449, + 0x3798c9, + 0x364b05, + 0x29b84a, + 0x28224a, + 0x289f8c, + 0x28a106, + 0x27b9c6, + 0x2bea86, + 0x26db09, + 0x2067c6, + 0x261146, + 0x264806, + 0x266748, + 0x215446, + 0x2c450b, + 0x290605, + 0x29d545, + 0x27bc45, + 0x202106, + 0x210143, + 0x23eb86, + 0x273f87, + 0x2b9c85, + 0x243645, + 0x3a0505, + 0x335f46, + 0x31abc4, + 0x372a46, + 0x299489, + 0x201f8c, + 0x370408, + 0x202ec4, + 0x2efbc6, + 0x29c7c6, + 0x252148, + 0x2d8b48, + 0x201e89, + 0x20d707, + 0x2563c9, + 0x24cf86, + 0x22cf44, + 0x20f184, + 0x288bc4, + 0x283308, + 0x20798a, + 0x3524c6, + 0x356387, + 0x236087, + 0x22c245, + 0x2a9184, + 0x28c446, + 0x302846, + 0x233303, + 0x35de87, + 0x34ccc8, + 0x364c4a, + 0x2cbb88, + 0x2ce688, + 0x359c85, + 0x248705, + 0x26ba85, + 0x22c606, + 0x37ed86, + 0x209445, + 0x2cfac9, + 0x2a8f8c, + 0x26bb47, + 0x2965c8, + 0x257d45, + 0x687e44, + 0x247884, + 0x2d4184, + 0x2c1606, + 0x29da4e, + 0x20ec87, + 0x2edfc5, + 0x25238c, + 0x2c0047, + 0x325f87, + 0x35bf49, + 0x21b949, + 0x287bc5, + 0x35e048, + 0x32a609, + 0x2f2ec5, + 0x2b9bc8, + 0x2c4ac6, + 0x341346, + 0x331b84, + 0x2a46c8, + 0x249e83, + 0x342c84, + 0x2ad545, + 0x335407, + 0x20f4c5, + 0x2849c9, + 0x28d60d, + 0x2a6246, + 0x32c004, + 0x211188, + 0x27b34a, + 0x20cb87, + 0x239d85, + 0x206d03, + 0x29d08e, + 0x25258c, + 0x2fb707, + 0x29dc07, + 0x201c43, + 0x206805, + 0x2d4185, + 0x296f88, + 0x2940c9, + 0x202dc6, + 0x240644, + 0x237046, + 0x37564b, + 0x3a0b8c, + 0x341ec7, + 0x2c9385, + 0x38cb48, + 0x2d0305, + 0x328f07, + 0x23bf07, + 0x249e85, + 0x210143, + 0x2af184, + 0x20f945, + 0x2ad0c5, + 0x2ad0c6, + 0x2927c8, + 0x326007, + 0x382646, + 0x206486, + 0x35a486, + 0x263a09, + 0x342707, + 0x202c46, + 0x3a0d06, + 0x249f46, + 0x2a6f45, + 0x20aa06, + 0x399385, + 0x376088, + 0x2936cb, + 0x28c246, + 0x2360c4, + 0x2f0689, + 0x266584, + 0x2c4a48, + 0x2961c7, + 0x286144, + 0x2b4708, + 0x2bad84, + 0x2a6f84, + 0x2889c5, + 0x2d8a46, + 0x2aed87, + 0x2643c3, + 0x29c405, + 0x2f7784, + 0x366846, + 0x364b88, + 0x327885, + 0x28ff09, + 0x216e45, + 0x2e2b48, + 0x263747, + 0x38a2c8, + 0x2b3c07, + 0x2f3b89, + 0x327d06, + 0x36bd06, + 0x264804, + 0x269e05, + 0x2f5e4c, + 0x27bc47, + 0x27c447, + 0x235f48, + 0x2a6246, + 0x273ec4, + 0x2ead84, + 0x283489, + 0x2beb86, + 0x218247, + 0x30cd04, + 0x2ffb06, + 0x325b85, + 0x2a15c7, + 0x2c4486, + 0x248209, + 0x282087, + 0x269307, + 0x29ffc6, + 0x310c85, + 0x280a08, + 0x21f7c8, + 0x23d906, + 0x3278c5, + 0x261c86, + 0x205d03, + 0x296e09, + 0x2a718e, + 0x2b2a08, + 0x2c0288, + 0x23d70b, + 0x290146, + 0x321ac4, + 0x284f84, + 0x2a728a, + 0x219ec7, + 0x202d05, + 0x217549, + 0x2b8cc5, + 0x313c87, + 0x301e84, + 0x397107, + 0x2372c8, + 0x2d1bc6, + 0x3a6e49, + 0x2b550a, + 0x219e46, + 0x294f86, + 0x2ab885, + 0x37cb05, + 0x357f47, + 0x2456c8, + 0x325ac8, + 0x242b06, + 0x226305, + 0x32fbce, + 0x333b84, + 0x23d885, + 0x27aa89, + 0x272148, + 0x3acc46, + 0x298d8c, + 0x29b350, + 0x29d68f, + 0x29ef08, + 0x2bf307, + 0x2043c5, + 0x282d05, + 0x2aec09, + 0x294489, + 0x30b2c6, + 0x2fb447, + 0x393d85, + 0x36d749, + 0x3320c6, + 0x20660d, + 0x288a89, + 0x207f44, + 0x2b2788, + 0x3a5889, + 0x352686, + 0x27b205, + 0x36bd06, + 0x30bfc9, + 0x2381c8, + 0x213585, + 0x284c04, + 0x298f4b, + 0x352545, + 0x267686, + 0x2856c6, + 0x26b006, + 0x3a388b, + 0x290009, + 0x209785, + 0x38a087, + 0x20e746, + 0x339506, + 0x284888, + 0x269fc9, + 0x26ff4c, + 0x2e5448, + 0x352786, + 0x349543, + 0x2d2986, + 0x2829c5, + 0x27f688, + 0x225d06, + 0x2a1808, + 0x246cc5, + 0x215185, + 0x2a0848, + 0x378f47, + 0x382287, + 0x3261c7, + 0x33f488, + 0x28e9c8, + 0x24de06, + 0x309d87, + 0x2586c7, + 0x28288a, + 0x24ce83, + 0x202106, + 0x203005, + 0x324244, + 0x27d409, + 0x2f3b04, + 0x22b5c4, + 0x29c644, + 0x29dc0b, + 0x3a5a07, + 0x32fe05, + 0x293548, + 0x27b106, + 0x27b108, + 0x27ebc6, + 0x28adc5, + 0x28b545, + 0x28d046, + 0x28e488, + 0x28e748, + 0x27d5c6, + 0x29338f, + 0x2968d0, + 0x3a26c5, + 0x202003, + 0x24c905, + 0x2ff3c8, + 0x294389, + 0x330708, + 0x263888, + 0x380648, + 0x3a5ac7, + 0x27adc9, + 0x2a1a08, + 0x2b0684, + 0x29c4c8, + 0x25b809, + 0x30b8c7, + 0x298144, + 0x342208, + 0x310f8a, + 0x2c3ec6, + 0x237446, + 0x225409, + 0x29b587, + 0x2c4e48, + 0x209fc8, + 0x30cb88, + 0x355ec5, + 0x37da85, + 0x29d545, + 0x2d4145, + 0x2f1807, + 0x210145, + 0x2b9c85, + 0x212ec6, + 0x330647, + 0x2fc647, + 0x311286, + 0x2ca805, + 0x267686, + 0x240405, + 0x2bfec8, + 0x2ff984, + 0x3ad546, + 0x2e7dc4, + 0x2fdac8, + 0x3ad64a, + 0x27de4c, + 0x399b85, + 0x2ef186, + 0x270106, + 0x34c5c6, + 0x2ff5c4, + 0x325e45, + 0x27ea07, + 0x29b609, + 0x2a2947, + 0x687e44, + 0x687e44, + 0x30bd45, + 0x229144, + 0x29874a, + 0x27af86, + 0x2e5884, + 0x2017c5, + 0x2eb445, + 0x302744, + 0x285307, + 0x216dc7, + 0x2c4048, + 0x317048, + 0x213589, + 0x3321c8, + 0x29890b, + 0x214e04, + 0x361905, + 0x281f85, + 0x326149, + 0x269fc9, + 0x2f0588, + 0x2313c8, + 0x283084, + 0x29c805, + 0x207503, + 0x214dc5, + 0x2c0486, + 0x293f0c, + 0x21f846, + 0x240446, + 0x2940c5, + 0x335fc8, + 0x3a0e06, + 0x2bc1c6, + 0x237446, + 0x22d6cc, + 0x266cc4, + 0x35a5ca, + 0x3ace08, + 0x293d47, + 0x244946, + 0x202e87, + 0x2e0845, + 0x323e06, + 0x354906, + 0x382147, + 0x22b604, + 0x2e4b45, + 0x27aa84, + 0x370747, + 0x27acc8, + 0x27b84a, + 0x281787, + 0x23da87, + 0x2bf287, + 0x2d0449, + 0x293f0a, + 0x22cf03, + 0x22b505, + 0x219243, + 0x30a089, + 0x2f1108, + 0x2d5507, + 0x330809, + 0x21f8c6, + 0x2b0208, + 0x2fd445, + 0x21574a, + 0x326dc9, + 0x26fc09, + 0x379787, + 0x22f589, + 0x219108, + 0x2eff86, + 0x2ef348, + 0x215e47, + 0x320447, + 0x2d8f47, + 0x2d5388, + 0x2efa46, + 0x310d45, + 0x27ea07, + 0x294a88, + 0x35a404, + 0x350f04, + 0x28f0c7, + 0x2acdc7, + 0x32a48a, + 0x2eff06, + 0x2fb58a, + 0x2b8f87, + 0x333947, + 0x242c04, + 0x38cfc4, + 0x227a06, + 0x264d04, + 0x264d0c, + 0x3b1f45, + 0x21ab09, + 0x2e2cc4, + 0x302805, + 0x27b2c8, + 0x28e805, + 0x31abc6, + 0x2115c4, + 0x2a0a8a, + 0x2b0846, + 0x29574a, + 0x31a987, + 0x263ec5, + 0x21f245, + 0x22c28a, + 0x2a0585, + 0x29ea06, + 0x201944, + 0x2ae486, + 0x358005, + 0x225dc6, + 0x2ee00c, + 0x2c4fca, + 0x269ec4, + 0x233bc6, + 0x29b587, + 0x2c8984, + 0x266748, + 0x38e606, + 0x32fa49, + 0x2c5ec9, + 0x36ab09, + 0x373c86, + 0x215f46, + 0x2ef487, + 0x2cfa08, + 0x215d49, + 0x3a5a07, + 0x2b3346, + 0x383c87, + 0x37e885, + 0x333b84, + 0x2ef047, + 0x2f7805, + 0x288905, + 0x300687, + 0x249d48, + 0x38cac6, + 0x2959cd, + 0x29718f, + 0x29cecd, + 0x205204, + 0x232f06, + 0x2cbec8, + 0x2647c5, + 0x282a48, + 0x2116ca, + 0x207f44, + 0x3a7006, + 0x39f8c7, + 0x22ef87, + 0x29ca09, + 0x2ef305, + 0x302744, + 0x33374a, + 0x2b4fc9, + 0x22f687, + 0x26cbc6, + 0x352686, + 0x29c746, + 0x363786, + 0x2cb84f, + 0x2cbd89, + 0x215446, + 0x22f386, + 0x27a409, + 0x309e87, + 0x21d943, + 0x22d846, + 0x20fa43, + 0x2d7a08, + 0x383ac7, + 0x29f109, + 0x2a7bc8, + 0x3823c8, + 0x26b686, + 0x23c549, + 0x2c7a85, + 0x244944, + 0x349a07, + 0x26db85, + 0x205204, + 0x32fec8, + 0x21a184, + 0x305647, + 0x319c06, + 0x29f785, + 0x2a4ec8, + 0x35254b, + 0x35e547, + 0x22c506, + 0x2be584, + 0x321a46, + 0x2bdb45, + 0x2f7805, + 0x280789, + 0x284f09, + 0x2a2a04, + 0x3204c5, + 0x233c05, + 0x2155c6, + 0x35e148, + 0x2b7586, + 0x34cb0b, + 0x301f0a, + 0x2fda05, + 0x28b5c6, + 0x2f6f05, + 0x209845, + 0x29ad47, + 0x2073c8, + 0x2563c4, + 0x364a06, + 0x28e7c6, + 0x2192c7, + 0x301584, + 0x27f506, + 0x300e05, + 0x300e09, + 0x216144, + 0x2a9309, + 0x27d5c6, + 0x2ba888, + 0x233c05, + 0x236185, + 0x225dc6, + 0x26fe49, + 0x21b949, + 0x2404c6, + 0x272248, + 0x252488, + 0x2f6ec4, + 0x363c84, + 0x363c88, + 0x3158c8, + 0x2564c9, + 0x2c0406, + 0x237446, + 0x312ccd, + 0x3051c6, + 0x3ae009, + 0x2022c5, + 0x35ae46, + 0x261248, + 0x30fc45, + 0x258704, + 0x2bdb45, + 0x284588, + 0x298509, + 0x27ab44, + 0x239546, + 0x2e5d0a, + 0x2d6388, + 0x32a609, + 0x35b64a, + 0x330786, + 0x297348, + 0x328cc5, + 0x326c48, + 0x2b3d05, + 0x21f789, + 0x368809, + 0x202e02, + 0x2a59c5, + 0x272e86, + 0x27d507, + 0x324245, + 0x2f9986, + 0x30fd08, + 0x2a6246, + 0x2c27c9, + 0x27c546, + 0x284708, + 0x2a8645, + 0x24ae86, + 0x3449c8, + 0x283308, + 0x3a4ac8, + 0x2febc8, + 0x20aa04, + 0x22a783, + 0x2c2a04, + 0x236fc6, + 0x37e8c4, + 0x2c01c7, + 0x2bc0c9, + 0x2bdd85, + 0x209fc6, + 0x22d846, + 0x29260b, + 0x2fd946, + 0x316406, + 0x2c2f88, + 0x243586, + 0x263cc3, + 0x20a383, + 0x333b84, + 0x22f905, + 0x384107, + 0x27acc8, + 0x27accf, + 0x27e90b, + 0x35df48, + 0x2395c6, + 0x35e24e, + 0x225dc3, + 0x2b1d84, + 0x2fd8c5, + 0x33d746, + 0x28c54b, + 0x290546, + 0x21a449, + 0x29f785, + 0x38b708, + 0x212088, + 0x21b80c, + 0x29dc46, + 0x214e06, + 0x2d9545, + 0x288d48, + 0x27de45, + 0x33c448, + 0x29d30a, + 0x361e09, + 0x687e44, + 0x2f606a82, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x327883, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x24c083, + 0x204703, + 0x223503, + 0x223504, + 0x22bf83, + 0x234a44, + 0x231b03, + 0x2c8144, + 0x250cc3, + 0x324507, + 0x220ec3, + 0x2020c3, + 0x255bc8, + 0x204703, + 0x2d2d8b, + 0x2e0f83, + 0x25b1c6, + 0x2012c2, + 0x387d0b, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x204703, + 0x29dfc3, + 0x205a03, + 0x200882, + 0x77a48, + 0x343145, + 0x2d4e88, + 0x2da008, + 0x206a82, + 0x330f85, + 0x357bc7, + 0x200202, + 0x2424c7, + 0x20f582, + 0x23d4c7, + 0x265249, + 0x3167c8, + 0x30ca09, + 0x3345c2, + 0x26ab07, + 0x240244, + 0x357c87, + 0x301e07, + 0x244d02, + 0x220ec3, + 0x203642, + 0x202b42, + 0x200fc2, + 0x200ac2, + 0x203942, + 0x203682, + 0x2a81c5, + 0x2477c5, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x481, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x202243, + 0x24c083, + 0x204703, + 0x20b743, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0xa9c2, + 0x77a48, + 0x45684, + 0xd0705, + 0x200882, + 0x2bb844, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x37f383, + 0x2a8e05, + 0x202243, + 0x39a883, + 0x24c083, + 0x20f543, + 0x204703, + 0x20b803, + 0x223583, + 0x2232c3, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x206a82, + 0x204703, + 0x77a48, + 0x250cc3, + 0x77a48, + 0x2cd683, + 0x22bf83, + 0x22ff84, + 0x231b03, + 0x250cc3, + 0x20b542, + 0x220ec3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x20b542, + 0x232003, + 0x24c083, + 0x204703, + 0x2d9f83, + 0x20b803, + 0x200882, + 0x206a82, + 0x250cc3, + 0x24c083, + 0x204703, + 0x25b1c5, + 0xad186, + 0x223504, + 0x2012c2, + 0x77a48, + 0x200882, + 0x20048, + 0x206a82, + 0xf206, + 0x143f44, + 0x10844b, + 0x18986, + 0x142b87, + 0x231b03, + 0x250cc3, + 0x159b85, + 0x14d4c4, + 0x24dd43, + 0x4ce47, + 0xcd204, + 0x24c083, + 0x14c104, + 0x204703, + 0x2e1c44, + 0x10c888, + 0x122706, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x2020c3, + 0x204703, + 0x2e0f83, + 0x2012c2, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c3, + 0x211004, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x2c8144, + 0x250cc3, + 0x24c083, + 0x204703, + 0x25b1c6, + 0x231b03, + 0x250cc3, + 0x178d03, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x142b87, + 0x77a48, + 0x250cc3, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x3822bf83, + 0x231b03, + 0x24c083, + 0x204703, + 0x77a48, + 0x200882, + 0x206a82, + 0x22bf83, + 0x250cc3, + 0x24c083, + 0x200fc2, + 0x204703, + 0x30abc7, + 0x2e330b, + 0x208883, + 0x23a8c8, + 0x2cf787, + 0x2b7b46, + 0x2bc885, + 0x2f9509, + 0x25af48, + 0x31b349, + 0x31b350, + 0x35d24b, + 0x2ec7c9, + 0x206103, + 0x2130c9, + 0x230a86, + 0x230a8c, + 0x31b548, + 0x3ad308, + 0x279709, + 0x29e40e, + 0x37b34b, + 0x237bcc, + 0x201e03, + 0x268d0c, + 0x208349, + 0x375287, + 0x231a4c, + 0x39c18a, + 0x247204, + 0x3a4d8d, + 0x268bc8, + 0x3a52cd, + 0x26e086, + 0x290ccb, + 0x375b49, + 0x3162c7, + 0x31eb06, + 0x321c09, + 0x34b9ca, + 0x304f48, + 0x2e0b84, + 0x362087, + 0x278a07, + 0x30cf04, + 0x228dc4, + 0x262f09, + 0x2ce4c9, + 0x323148, + 0x212905, + 0x392845, + 0x20d546, + 0x3a4c49, + 0x21194d, + 0x237648, + 0x20d447, + 0x2bc908, + 0x32cc06, + 0x3a2444, + 0x37dd45, + 0x204946, + 0x205884, + 0x208247, + 0x20a60a, + 0x214bc4, + 0x219d86, + 0x21a789, + 0x21a78f, + 0x21b50d, + 0x21ca86, + 0x21fc50, + 0x220046, + 0x220787, + 0x221047, + 0x22104f, + 0x221889, + 0x2256c6, + 0x227e87, + 0x227e88, + 0x228249, + 0x290248, + 0x2d7547, + 0x20ce83, + 0x386a86, + 0x2e4c88, + 0x29e6ca, + 0x214689, + 0x212383, + 0x357ac6, + 0x36484a, + 0x2f7d07, + 0x3750ca, + 0x2028ce, + 0x2219c6, + 0x2a5bc7, + 0x217306, + 0x208406, + 0x37d88b, + 0x3038ca, + 0x22704d, + 0x216007, + 0x264988, + 0x264989, + 0x26498f, + 0x20e34c, + 0x27f909, + 0x3af74e, + 0x32460a, + 0x329546, + 0x37a686, + 0x306ccc, + 0x3108cc, + 0x32adc8, + 0x33fe07, + 0x2b0585, + 0x206a84, + 0x345b4e, + 0x34bc44, + 0x22adc7, + 0x265e4a, + 0x382ad4, + 0x384e4f, + 0x221208, + 0x386948, + 0x36d10d, + 0x36d10e, + 0x390509, + 0x22e108, + 0x22e10f, + 0x23174c, + 0x23174f, + 0x232c47, + 0x2352ca, + 0x21f50b, + 0x239c08, + 0x23aac7, + 0x25a64d, + 0x35c546, + 0x3a4f46, + 0x23ea09, + 0x250248, + 0x243048, + 0x24304e, + 0x2e3407, + 0x2aabc5, + 0x244ec5, + 0x200e84, + 0x2b7e06, + 0x323048, + 0x25ff83, + 0x2de30e, + 0x25aa08, + 0x308dcb, + 0x367487, + 0x3a4485, + 0x22f246, + 0x2a9b47, + 0x2e8448, + 0x330489, + 0x35c7c5, + 0x2878c8, + 0x214006, + 0x37e50a, + 0x345a49, + 0x231b09, + 0x231b0b, + 0x31f608, + 0x30cdc9, + 0x2129c6, + 0x35a8ca, + 0x2b898a, + 0x2354cc, + 0x35c307, + 0x291e8a, + 0x2af68b, + 0x2af699, + 0x2dd788, + 0x25b245, + 0x25a806, + 0x2dad89, + 0x316cc6, + 0x211e4a, + 0x342a06, + 0x2143c4, + 0x2c098d, + 0x24fd07, + 0x2143c9, + 0x245e85, + 0x245fc8, + 0x2467c9, + 0x246a04, + 0x247107, + 0x247108, + 0x2479c7, + 0x2687c8, + 0x24d447, + 0x23c285, + 0x25520c, + 0x2558c9, + 0x36bf0a, + 0x38f249, + 0x2131c9, + 0x27450c, + 0x259a4b, + 0x259d08, + 0x25bf88, + 0x25f344, + 0x285e08, + 0x286bc9, + 0x39c247, + 0x21a9c6, + 0x2413c7, + 0x3af509, + 0x2af2cb, + 0x325947, + 0x204787, + 0x2e2d47, + 0x3a5244, + 0x3a5245, + 0x2a6d05, + 0x337a0b, + 0x398604, + 0x317e88, + 0x2aa7ca, + 0x2140c7, + 0x34a107, + 0x28bdd2, + 0x288046, + 0x22fb86, + 0x32728e, + 0x34ae06, + 0x291308, + 0x29210f, + 0x3a5688, + 0x377c48, + 0x2b4b8a, + 0x2b4b91, + 0x2a0d0e, + 0x23adca, + 0x23adcc, + 0x22e307, + 0x22e310, + 0x203d08, + 0x2a0f05, + 0x2aa10a, + 0x2058cc, + 0x2b368d, + 0x2abe06, + 0x2abe07, + 0x2abe0c, + 0x2f338c, + 0x2d988c, + 0x28f4cb, + 0x287284, + 0x225584, + 0x371389, + 0x2d8447, + 0x32c449, + 0x2b87c9, + 0x367f07, + 0x39c006, + 0x39c009, + 0x3a6b43, + 0x2a634a, + 0x206cc7, + 0x3624cb, + 0x226eca, + 0x23d604, + 0x3564c6, + 0x281a09, + 0x20b504, + 0x3b200a, + 0x2f9b45, + 0x2b6285, + 0x2b628d, + 0x2b65ce, + 0x3146c5, + 0x3942c6, + 0x25adc7, + 0x2dc04a, + 0x2e8646, + 0x2fc484, + 0x2f5987, + 0x220d8b, + 0x32ccc7, + 0x3a34c4, + 0x374206, + 0x37420d, + 0x23918c, + 0x380406, + 0x23784a, + 0x217b06, + 0x21da08, + 0x228507, + 0x37f14a, + 0x2310c6, + 0x215f03, + 0x2613c6, + 0x201308, + 0x298b0a, + 0x26c447, + 0x26c448, + 0x2732c4, + 0x2863c7, + 0x28e0c8, + 0x2151c8, + 0x285c08, + 0x32630a, + 0x2cff85, + 0x2c76c7, + 0x23ac13, + 0x22c006, + 0x2b09c8, + 0x223ac9, + 0x242388, + 0x26b70b, + 0x2b8d88, + 0x220ec4, + 0x2a0946, + 0x3b11c6, + 0x2d8889, + 0x387747, + 0x255308, + 0x3acf86, + 0x21c444, + 0x2c4d05, + 0x2bf0c8, + 0x2bf98a, + 0x2c0608, + 0x2c5446, + 0x29920a, + 0x234548, + 0x2c8788, + 0x2c9bc8, + 0x2ca4c6, + 0x2cc0c6, + 0x31f00c, + 0x2cc590, + 0x28a505, + 0x2f9f88, + 0x2f9f90, + 0x3a5490, + 0x31b1ce, + 0x31ec8e, + 0x31ec94, + 0x31f7cf, + 0x31fb86, + 0x250751, + 0x3086d3, + 0x308b48, + 0x321585, + 0x359ec8, + 0x20f3c5, + 0x22964c, + 0x256789, + 0x22ac09, + 0x241147, + 0x214989, + 0x24ff47, + 0x2b6106, + 0x37db47, + 0x2605c5, + 0x310b83, + 0x260149, + 0x227409, + 0x378d03, + 0x3abb84, + 0x38004d, + 0x3810cf, + 0x3005c5, + 0x3188c6, + 0x20d147, + 0x303d07, + 0x289946, + 0x28994b, + 0x2a2085, + 0x257ec6, + 0x209247, + 0x273949, + 0x3358c6, + 0x210805, + 0x22400b, + 0x37f406, + 0x249885, + 0x39f588, + 0x2b5cc8, + 0x2b6b4c, + 0x2b6b50, + 0x2ca9c9, + 0x2f8587, + 0x2de9cb, + 0x2d59c6, + 0x2d740a, + 0x2d860b, + 0x2d918a, + 0x2d9406, + 0x2d9e45, + 0x2cf686, + 0x27c708, + 0x24120a, + 0x36cd9c, + 0x2e104c, + 0x2e1348, + 0x25b1c5, + 0x2e51c7, + 0x29e046, + 0x399445, + 0x21ea46, + 0x289b08, + 0x2b5247, + 0x29e308, + 0x2a5cca, + 0x321f8c, + 0x322209, + 0x20a147, + 0x20c544, + 0x245846, + 0x3777ca, + 0x2b88c5, + 0x3a2c8c, + 0x3a5088, + 0x350b48, + 0x20da4c, + 0x213c4c, + 0x2162c9, + 0x216507, + 0x2c7e0c, + 0x32f644, + 0x39080a, + 0x20b80c, + 0x274d8b, + 0x23a28b, + 0x23b346, + 0x23df07, + 0x22e547, + 0x22e54f, + 0x2f4311, + 0x3b1ad2, + 0x23eecd, + 0x23eece, + 0x23f20e, + 0x31f988, + 0x31f992, + 0x242e48, + 0x2fc287, + 0x24a88a, + 0x20fd88, + 0x34adc5, + 0x2f164a, + 0x220587, + 0x2e6e04, + 0x24ddc3, + 0x376a45, + 0x2b4e07, + 0x2fa347, + 0x2b388e, + 0x38708d, + 0x39b189, + 0x216845, + 0x2ea903, + 0x25f8c6, + 0x36ba85, + 0x309008, + 0x2eb049, + 0x25a845, + 0x25a84f, + 0x2d9c87, + 0x2f9445, + 0x271d8a, + 0x3a2986, + 0x21db89, + 0x2ec3cc, + 0x2ee449, + 0x203f46, + 0x2aa5cc, + 0x2eea06, + 0x2f19c8, + 0x2f1bc6, + 0x2dd906, + 0x24fa84, + 0x25a083, + 0x35efca, + 0x31e391, + 0x27faca, + 0x327f85, + 0x38ec47, + 0x251b47, + 0x28e1c4, + 0x28e1cb, + 0x316648, + 0x2b2886, + 0x235fc5, + 0x265904, + 0x269ac9, + 0x27a984, + 0x3041c7, + 0x2ee645, + 0x2ee647, + 0x3274c5, + 0x2a8283, + 0x2fc148, + 0x325c0a, + 0x2643c3, + 0x34318a, + 0x274386, + 0x25a5cf, + 0x358f89, + 0x2de290, + 0x2e1848, + 0x2c5909, + 0x298347, + 0x37418f, + 0x330bc4, + 0x2c81c4, + 0x21b386, + 0x275746, + 0x2ff74a, + 0x32b746, + 0x33e787, + 0x2f8b48, + 0x2f8d47, + 0x2f9747, + 0x34da8a, + 0x2fbf8b, + 0x238845, + 0x3b1708, + 0x22aec3, + 0x36524c, + 0x38d68f, + 0x2b038d, + 0x2ef707, + 0x39b2c9, + 0x22cb47, + 0x240008, + 0x382ccc, + 0x272988, + 0x2511c8, + 0x30d64e, + 0x31d014, + 0x31d524, + 0x33ee8a, + 0x35d98b, + 0x250004, + 0x250009, + 0x3a7088, + 0x245a05, + 0x25fa8a, + 0x265107, + 0x2cf584, + 0x327883, + 0x22bf83, + 0x234a44, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x202243, + 0x220ec3, + 0x2cc586, + 0x211004, + 0x24c083, + 0x204703, + 0x21d603, + 0x200882, + 0x327883, + 0x206a82, + 0x22bf83, + 0x234a44, + 0x231b03, + 0x250cc3, + 0x202243, + 0x2cc586, + 0x24c083, + 0x204703, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x24c083, + 0x204703, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x211004, + 0x24c083, + 0x204703, + 0x722f48, + 0x201482, + 0x200482, + 0x206a82, + 0x22bf83, + 0x200d02, + 0x202002, + 0x2023c4, + 0x30db04, + 0x21ee82, + 0x211004, + 0x200fc2, + 0x204703, + 0x21d603, + 0x23b346, + 0x212dc2, + 0x203f02, + 0x214a02, + 0x3aa23a03, + 0x3ae095c3, + 0x52886, + 0x52886, + 0x223504, + 0xe3d4c, + 0x19a1cc, + 0x8390d, + 0xda987, + 0x1cc08, + 0x22408, + 0x1a9f8a, + 0x3bb1cb45, + 0x11cb49, + 0x142c08, + 0x16b1ca, + 0x170bce, + 0x144218b, + 0x143f44, + 0x16fcc8, + 0x7f087, + 0x12c47, + 0x172cc9, + 0xb587, + 0x14bcc8, + 0x1a4249, + 0xda4c5, + 0x6098e, + 0xa868d, + 0x142a08, + 0x3be6b1c6, + 0x62c87, + 0x65d07, + 0x6bf07, + 0x72b87, + 0xd502, + 0x14d247, + 0x103a8c, + 0xed107, + 0x90906, + 0xa3849, + 0xa5408, + 0x134c2, + 0x2002, + 0x1808cb, + 0x18549, + 0x44b09, + 0x15bd88, + 0xafcc2, + 0x3c909, + 0x120809, + 0xcd808, + 0xcde07, + 0xcff09, + 0xd32c5, + 0xd36d0, + 0x1a2ac6, + 0x62145, + 0x22f4d, + 0xb146, + 0xdb947, + 0xe1c58, + 0xcfc88, + 0x19110a, + 0x185e4d, + 0x4042, + 0x72606, + 0x8c808, + 0x14ac08, + 0x77909, + 0x496c8, + 0x56f0e, + 0xe9f85, + 0x4ef08, + 0x1bc2, + 0x122706, + 0x6c2, + 0xc01, + 0x3c2e24c4, + 0x3c692f43, + 0x141, + 0x4e86, + 0x141, + 0x1, + 0x4e86, + 0x1570305, + 0x247204, + 0x22bf83, + 0x248fc4, + 0x2023c4, + 0x24c083, + 0x223985, + 0x20b743, + 0x2298c3, + 0x2ebf05, + 0x2232c3, + 0x3d62bf83, + 0x231b03, + 0x250cc3, + 0x200041, + 0x220ec3, + 0x30db04, + 0x211004, + 0x24c083, + 0x204703, + 0x20b803, + 0x77a48, + 0x200882, + 0x327883, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x202002, + 0x2023c4, + 0x202243, + 0x220ec3, + 0x24c083, + 0x2020c3, + 0x204703, + 0x2232c3, + 0x77a48, + 0x38d402, + 0x6a82, + 0xf8f0e, + 0x3e600142, + 0x27a048, + 0x225f46, + 0x2bc3c6, + 0x2258c7, + 0x3ea06b82, + 0x3ef58e08, + 0x20628a, + 0x262708, + 0x200ec2, + 0x206b09, + 0x238887, + 0x21a946, + 0x208d89, + 0x25ba04, + 0x2b7a46, + 0x2e2884, + 0x27b484, + 0x254709, + 0x343886, + 0x247885, + 0x20eec5, + 0x3a7607, + 0x2b9207, + 0x36ee44, + 0x225b06, + 0x2f2945, + 0x2e48c5, + 0x2f6e45, + 0x392607, + 0x3672c5, + 0x3098c9, + 0x2644c5, + 0x2d0cc4, + 0x2e8587, + 0x2cee0e, + 0x31ae09, + 0x327149, + 0x35c146, + 0x31bf08, + 0x2ae58b, + 0x2d1fcc, + 0x26c186, + 0x37b207, + 0x20ae05, + 0x228dca, + 0x31a489, + 0x24f6c9, + 0x388f86, + 0x2f0f05, + 0x282145, + 0x366349, + 0x2f6fcb, + 0x27ed46, + 0x333e06, + 0x20d444, + 0x28ba86, + 0x2aac48, + 0x201186, + 0x203786, + 0x209988, + 0x20a887, + 0x20ab89, + 0x20bdc5, + 0x77a48, + 0x212bc4, + 0x37ee84, + 0x213a85, + 0x395589, + 0x222b07, + 0x222b0b, + 0x2243ca, + 0x228ac5, + 0x3f20ce82, + 0x226d87, + 0x3f629408, + 0x287607, + 0x343bc5, + 0x238f8a, + 0x6a82, + 0x266d4b, + 0x383dca, + 0x223c86, + 0x3a4483, + 0x338f8d, + 0x35b24c, + 0x36654d, + 0x385a45, + 0x23e645, + 0x25ffc7, + 0x200d09, + 0x206186, + 0x32b5c5, + 0x2a9f08, + 0x28b983, + 0x2da308, + 0x28b988, + 0x2bd387, + 0x3b00c8, + 0x200b09, + 0x233a47, + 0x2e2e87, + 0x2f74c8, + 0x24c0c4, + 0x24c0c7, + 0x26df88, + 0x204bc6, + 0x39a48f, + 0x218747, + 0x2d76c6, + 0x240185, + 0x36e8c3, + 0x36e8c7, + 0x36a503, + 0x247b86, + 0x249b86, + 0x24b006, + 0x28fd05, + 0x2687c3, + 0x389f48, + 0x36c709, + 0x3817cb, + 0x24b188, + 0x24d105, + 0x24e185, + 0x3fa3d6c2, + 0x37dc09, + 0x202447, + 0x257f45, + 0x254607, + 0x256dc6, + 0x363645, + 0x36b8cb, + 0x259d04, + 0x2622c5, + 0x262407, + 0x278406, + 0x278845, + 0x286007, + 0x286587, + 0x274344, + 0x28a70a, + 0x28abc8, + 0x328d49, + 0x3a0805, + 0x34c306, + 0x2aae0a, + 0x20edc6, + 0x266f87, + 0x31694d, + 0x227b09, + 0x329805, + 0x345fc7, + 0x344108, + 0x344788, + 0x323487, + 0x366f86, + 0x215b47, + 0x249503, + 0x337a04, + 0x360585, + 0x38dd07, + 0x392009, + 0x227608, + 0x22ccc5, + 0x3afa84, + 0x382f85, + 0x2474cd, + 0x204242, + 0x302bc6, + 0x272546, + 0x2a3cca, + 0x365a86, + 0x377705, + 0x317145, + 0x317147, + 0x37e34c, + 0x27648a, + 0x28b746, + 0x206945, + 0x28b8c6, + 0x28bc07, + 0x28d946, + 0x28fc0c, + 0x208ec9, + 0x3fe05307, + 0x2924c5, + 0x2924c6, + 0x2929c8, + 0x2b1285, + 0x2a2c05, + 0x2a2e48, + 0x2a304a, + 0x4020b882, + 0x4060f802, + 0x3827c5, + 0x2d7643, + 0x267348, + 0x21dcc3, + 0x2a32c4, + 0x21dccb, + 0x2ae948, + 0x2a6088, + 0x40b40cc9, + 0x2a7ec9, + 0x2a8586, + 0x2a97c8, + 0x2a99c9, + 0x2ab6c6, + 0x2ab845, + 0x383646, + 0x2ac389, + 0x341607, + 0x24ad46, + 0x238c87, + 0x206007, + 0x23bc44, + 0x40efa889, + 0x2c3d08, + 0x358d08, + 0x360187, + 0x2bed46, + 0x3007c9, + 0x2f7a07, + 0x32a14a, + 0x2be708, + 0x34c447, + 0x357e06, + 0x21ce8a, + 0x280b88, + 0x271fc5, + 0x22d185, + 0x2be8c7, + 0x2d1789, + 0x2d6d4b, + 0x2ede48, + 0x264549, + 0x24b747, + 0x3adc4c, + 0x2b0e4c, + 0x2b114a, + 0x2b13cc, + 0x2bc348, + 0x2bc548, + 0x2bc744, + 0x2bcb09, + 0x2bcd49, + 0x2bcf8a, + 0x2bd209, + 0x2bd547, + 0x20010c, + 0x242886, + 0x2794c8, + 0x20ee86, + 0x388a46, + 0x329707, + 0x31b008, + 0x261a4b, + 0x2874c7, + 0x2f0bc9, + 0x249149, + 0x253dc7, + 0x2e2ac4, + 0x363b07, + 0x34c986, + 0x2179c6, + 0x237a05, + 0x2ccd48, + 0x20f2c4, + 0x20f2c6, + 0x27634b, + 0x2a6649, + 0x31e946, + 0x35ab09, + 0x392786, + 0x301348, + 0x2025c3, + 0x209185, + 0x2038c9, + 0x20cb05, + 0x2fd284, + 0x277506, + 0x26bdc5, + 0x2db706, + 0x2fe047, + 0x2af586, + 0x2974cb, + 0x35a7c7, + 0x2d1646, + 0x371506, + 0x3a76c6, + 0x36ee09, + 0x24df0a, + 0x2b5985, + 0x22d94d, + 0x2a3146, + 0x391306, + 0x2e1746, + 0x21d985, + 0x2d39c7, + 0x29bb47, + 0x29fb0e, + 0x220ec3, + 0x2bed09, + 0x316e89, + 0x2291c7, + 0x27e2c7, + 0x2a7505, + 0x323f05, + 0x4126304f, + 0x2c5b47, + 0x2c5d08, + 0x2c6784, + 0x2c6a46, + 0x41629382, + 0x2ca746, + 0x2cc586, + 0x261d8e, + 0x2da14a, + 0x226886, + 0x22ee4a, + 0x205d89, + 0x314e85, + 0x393c08, + 0x3adb06, + 0x31e788, + 0x326ac8, + 0x24090b, + 0x2259c5, + 0x367348, + 0x209acc, + 0x343a87, + 0x24a7c6, + 0x27fd08, + 0x2b7cc8, + 0x41a09142, + 0x3700cb, + 0x376209, + 0x2d2b49, + 0x3a3347, + 0x20af88, + 0x41f5b088, + 0x20bfcb, + 0x35bc09, + 0x221d4d, + 0x378388, + 0x29bf88, + 0x422018c2, + 0x201084, + 0x4260dd02, + 0x2edbc6, + 0x42a02482, + 0x21c48a, + 0x322806, + 0x32c808, + 0x31c208, + 0x2b7946, + 0x388086, + 0x2e8046, + 0x308f85, + 0x23a584, + 0x42f012c4, + 0x338446, + 0x33be87, + 0x432e9287, + 0x35e7cb, + 0x2cf1c9, + 0x23e68a, + 0x261644, + 0x317288, + 0x24ab0d, + 0x2dfd49, + 0x2dff88, + 0x2e06c9, + 0x2e1c44, + 0x208c84, + 0x281645, + 0x36228b, + 0x2ae8c6, + 0x338285, + 0x343d49, + 0x225bc8, + 0x29f3c4, + 0x228f49, + 0x330045, + 0x2b9248, + 0x2e3547, + 0x327548, + 0x281c06, + 0x226c47, + 0x2910c9, + 0x224189, + 0x249905, + 0x339605, + 0x436284c2, + 0x2e8344, + 0x33b285, + 0x291c46, + 0x335e85, + 0x24e247, + 0x26ccc5, + 0x26cd44, + 0x35c206, + 0x32b647, + 0x244f86, + 0x3af445, + 0x37fd48, + 0x226145, + 0x39a807, + 0x3b2509, + 0x2a678a, + 0x236987, + 0x23698c, + 0x247846, + 0x22b289, + 0x340105, + 0x370f08, + 0x212983, + 0x212985, + 0x2e90c5, + 0x255707, + 0x43a17482, + 0x23e287, + 0x2e5046, + 0x2fa7c6, + 0x303146, + 0x2b7c06, + 0x3302c8, + 0x35a005, + 0x2d7787, + 0x2d778d, + 0x24ddc3, + 0x3a4885, + 0x271b47, + 0x387bc8, + 0x271705, + 0x228808, + 0x32c346, + 0x31cd07, + 0x2bdf45, + 0x225a46, + 0x2d3005, + 0x2bb8ca, + 0x2fc546, + 0x233dc7, + 0x2c6905, + 0x2f5207, + 0x2f5904, + 0x2fd206, + 0x331ac5, + 0x34368b, + 0x34c809, + 0x243bca, + 0x249988, + 0x336448, + 0x337c8c, + 0x3556c7, + 0x35dd48, + 0x361308, + 0x36bc05, + 0x3a024a, + 0x2ea909, + 0x43e04582, + 0x204586, + 0x20c984, + 0x2de009, + 0x362d89, + 0x2250c7, + 0x254447, + 0x2b8649, + 0x326508, + 0x32650f, + 0x270f06, + 0x241b0b, + 0x2ebd45, + 0x2ebd47, + 0x2ec189, + 0x21de06, + 0x228ec7, + 0x3b1e45, + 0x2305c4, + 0x26bc86, + 0x200c44, + 0x30ba07, + 0x2ee808, + 0x442f0e08, + 0x2f1305, + 0x2f1447, + 0x256149, + 0x201904, + 0x201908, + 0x4476d588, + 0x28e1c4, + 0x230f08, + 0x31ebc4, + 0x21bf09, + 0x22dc45, + 0x44a012c2, + 0x270f45, + 0x220885, + 0x24cb48, + 0x232a87, + 0x44e05a02, + 0x2c8405, + 0x2518c6, + 0x266246, + 0x2e8308, + 0x2e9c48, + 0x335e46, + 0x2eac86, + 0x21e589, + 0x2fa706, + 0x3081cb, + 0x28edc5, + 0x20fcc6, + 0x3abd88, + 0x3906c6, + 0x35c646, + 0x21d30a, + 0x258dca, + 0x24dac5, + 0x35a0c7, + 0x349746, + 0x45201242, + 0x271c87, + 0x236445, + 0x2aad84, + 0x2aad85, + 0x261546, + 0x276d47, + 0x20c705, + 0x258f44, + 0x26d548, + 0x35c705, + 0x28af07, + 0x293245, + 0x219805, + 0x24a284, + 0x28f709, + 0x2f2788, + 0x2cea06, + 0x212e06, + 0x28dec6, + 0x457a8c08, + 0x2f5087, + 0x2f53cd, + 0x2f5b4c, + 0x2f6149, + 0x2f6389, + 0x45b54382, + 0x3a6903, + 0x20c403, + 0x34ca45, + 0x38de0a, + 0x318e06, + 0x2fab45, + 0x2fe584, + 0x2fe58b, + 0x30e4cc, + 0x30ed0c, + 0x30f015, + 0x30f9cd, + 0x31150f, + 0x3118d2, + 0x311d4f, + 0x312112, + 0x312593, + 0x312a4d, + 0x31300d, + 0x31338e, + 0x31384e, + 0x31448c, + 0x31480c, + 0x314c4b, + 0x314fce, + 0x318092, + 0x318bcc, + 0x319110, + 0x32d652, + 0x32e60c, + 0x32eccd, + 0x32f00c, + 0x332dd1, + 0x333f8d, + 0x33664d, + 0x336c4a, + 0x336ecc, + 0x3377cc, + 0x337f8c, + 0x33880c, + 0x33c653, + 0x33cc50, + 0x33d050, + 0x33d8cd, + 0x33decc, + 0x33ebc9, + 0x34024d, + 0x340593, + 0x346891, + 0x346cd3, + 0x34738f, + 0x34774c, + 0x347a4f, + 0x347e0d, + 0x34840f, + 0x3487d0, + 0x34924e, + 0x34d5ce, + 0x34dd10, + 0x34e90d, + 0x34f28e, + 0x34f60c, + 0x3505d3, + 0x3521ce, + 0x352910, + 0x352d11, + 0x35314f, + 0x353513, + 0x353f0d, + 0x35424f, + 0x35460e, + 0x354f10, + 0x355309, + 0x356010, + 0x35664f, + 0x356ccf, + 0x357092, + 0x35860e, + 0x3594cd, + 0x35cc0d, + 0x35cf4d, + 0x35f24d, + 0x35f58d, + 0x35f8d0, + 0x35fccb, + 0x36034c, + 0x3606cc, + 0x3609cc, + 0x360cce, + 0x36f050, + 0x371692, + 0x371b0b, + 0x3720ce, + 0x37244e, + 0x3736ce, + 0x37454b, + 0x45f74b16, + 0x37740d, + 0x378594, + 0x37920d, + 0x37ad55, + 0x37be8d, + 0x37c80f, + 0x37d04f, + 0x381a8f, + 0x381e4e, + 0x3830cd, + 0x3855d1, + 0x38820c, + 0x38850c, + 0x38880b, + 0x38938c, + 0x38974f, + 0x389b12, + 0x38a4cd, + 0x38b48c, + 0x38b90c, + 0x38bc0d, + 0x38bf4f, + 0x38c30e, + 0x38dacc, + 0x38e08d, + 0x38e3cb, + 0x38f00c, + 0x38f58d, + 0x38f8ce, + 0x38fd49, + 0x390a93, + 0x39148d, + 0x3917cd, + 0x391dcc, + 0x39224e, + 0x392bcf, + 0x392f8c, + 0x39328d, + 0x3935cf, + 0x39398c, + 0x39408c, + 0x39444c, + 0x39474c, + 0x394e0d, + 0x395152, + 0x3957cc, + 0x395acc, + 0x395dd1, + 0x39620f, + 0x3965cf, + 0x396993, + 0x397a8e, + 0x39800f, + 0x3983cc, + 0x4639870e, + 0x398a8f, + 0x398e56, + 0x39ad12, + 0x39c40c, + 0x39cd0f, + 0x39d38d, + 0x39d6cf, + 0x39da8c, + 0x39dd8d, + 0x39e0cd, + 0x39fd0e, + 0x3a14cc, + 0x3a17cc, + 0x3a1ad0, + 0x3a5c91, + 0x3a60cb, + 0x3a650c, + 0x3a680e, + 0x3a9111, + 0x3a954e, + 0x3a98cd, + 0x3ad8cb, + 0x3ae8cf, + 0x3afb94, + 0x224242, + 0x224242, + 0x204c83, + 0x224242, + 0x204c83, + 0x224242, + 0x20c942, + 0x383685, + 0x3a8e0c, + 0x224242, + 0x224242, + 0x20c942, + 0x224242, + 0x293045, + 0x2a6785, + 0x224242, + 0x224242, + 0x212bc2, + 0x293045, + 0x30ff09, + 0x34658c, + 0x224242, + 0x224242, + 0x224242, + 0x224242, + 0x383685, + 0x224242, + 0x224242, + 0x224242, + 0x224242, + 0x212bc2, + 0x30ff09, + 0x224242, + 0x224242, + 0x224242, + 0x2a6785, + 0x224242, + 0x2a6785, + 0x34658c, + 0x3a8e0c, + 0x327883, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x24c083, + 0x204703, + 0x4fdc8, + 0x57044, + 0x4bf48, + 0x200882, + 0x47206a82, + 0x240bc3, + 0x244c44, + 0x209d03, + 0x2db184, + 0x22fb86, + 0x203b43, + 0x379084, + 0x27bd85, + 0x220ec3, + 0x24c083, + 0x204703, + 0x24e9ca, + 0x23b346, + 0x3727cc, + 0x77a48, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x232003, + 0x2cc586, + 0x24c083, + 0x204703, + 0x21d603, + 0x8ac2, + 0xda987, + 0xb7008, + 0xf60e, + 0x89352, + 0x6e0b, + 0x47f1cb45, + 0x48375d8c, + 0x3c347, + 0x117a4a, + 0x3b4d0, + 0x16fcc8, + 0x7f087, + 0x58a4b, + 0x172cc9, + 0x16fbc7, + 0xb587, + 0x7ef87, + 0x188c6, + 0x14bcc8, + 0x4881c106, + 0xa868d, + 0x117410, + 0x48c08b02, + 0x142a08, + 0x6ca87, + 0x88349, + 0x52946, + 0x92bc8, + 0x10442, + 0x9ec8a, + 0xf2347, + 0xed107, + 0xa3849, + 0xa5408, + 0x159b85, + 0xe0c0e, + 0x1224e, + 0x171cf, + 0x18549, + 0x44b09, + 0x6f38b, + 0x8248f, + 0x8f90c, + 0xac74b, + 0x16be48, + 0x15e6c7, + 0xf02c8, + 0x11bd0b, + 0x13e54c, + 0x149e0c, + 0x151ecc, + 0x154a4d, + 0x15bd88, + 0x3c909, + 0x14e38b, + 0xbef46, + 0xcdfc5, + 0xd36d0, + 0x197586, + 0x62145, + 0xd6908, + 0xdb947, + 0xdcfc7, + 0x142e47, + 0xeda0a, + 0xb6e8a, + 0x72606, + 0x906cd, + 0x14ac08, + 0x496c8, + 0x4a1c9, + 0xed54c, + 0x154c4b, + 0x120304, + 0x16e409, + 0xd7e86, + 0x3f02, + 0x122706, + 0x6c2, + 0xc2ec5, + 0x481, + 0xb7c3, + 0x4878cd86, + 0x92f43, + 0xf582, + 0x3a004, + 0xec2, + 0x23504, + 0x9c2, + 0x8f02, + 0x70c2, + 0x105b42, + 0x1482, + 0x107ac2, + 0x8c2, + 0x1eb82, + 0x35542, + 0x682, + 0x1582, + 0xb042, + 0x31b03, + 0x4142, + 0x202, + 0x9342, + 0xdb02, + 0x10702, + 0x30842, + 0x134c2, + 0x42, + 0x4982, + 0x2002, + 0x2243, + 0x3fc2, + 0x7382, + 0xafcc2, + 0xac82, + 0x17382, + 0x3702, + 0x33a82, + 0x6a42, + 0xf842, + 0x16f542, + 0x8b82, + 0xb602, + 0x4c083, + 0xdc2, + 0x9142, + 0x1042, + 0x12f42, + 0x49885, + 0xa742, + 0x42242, + 0x3e943, + 0x34c2, + 0xed42, + 0x4042, + 0x2742, + 0x3a02, + 0x5a02, + 0x1bc2, + 0x3f02, + 0x74747, + 0x213f03, + 0x200882, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x215ac3, + 0x232003, + 0x24c083, + 0x2020c3, + 0x204703, + 0x292f83, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x220ec3, + 0x24c083, + 0x2020c3, + 0x204703, + 0x22bf83, + 0x231b03, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x200041, + 0x220ec3, + 0x24c083, + 0x20f543, + 0x204703, + 0x327883, + 0x22bf83, + 0x231b03, + 0x2aa943, + 0x20f583, + 0x376b83, + 0x285843, + 0x2a4543, + 0x240383, + 0x250cc3, + 0x2023c4, + 0x24c083, + 0x204703, + 0x2232c3, + 0x341bc4, + 0x22b683, + 0x1e03, + 0x201283, + 0x330d48, + 0x21cec4, + 0x315e4a, + 0x3807c6, + 0xdd704, + 0x3a4107, + 0x22134a, + 0x270dc9, + 0x3b15c7, + 0x20054a, + 0x327883, + 0x38284b, + 0x3292c9, + 0x28dfc5, + 0x2ca587, + 0x6a82, + 0x22bf83, + 0x3583c7, + 0x21e2c5, + 0x2e2989, + 0x231b03, + 0x2257c6, + 0x2b1e83, + 0xe50c3, + 0xfd786, + 0x60346, + 0x3ac7, + 0x214246, + 0x21a385, + 0x20be87, + 0x338647, + 0x4ae50cc3, + 0x32e847, + 0x363a03, + 0x238785, + 0x2023c4, + 0x222788, + 0x2ae00c, + 0x2ad305, + 0x3a30c6, + 0x358287, + 0x20a207, + 0x3b0407, + 0x205688, + 0x27ffcf, + 0x2d2a85, + 0x240cc7, + 0x39f787, + 0x2a340a, + 0x2a9d49, + 0x2d8305, + 0x2dbe4a, + 0x1225c6, + 0x2bb605, + 0x371d44, + 0x2b7886, + 0x300b87, + 0x23b987, + 0x341908, + 0x2025c5, + 0x21e1c6, + 0x203705, + 0x267105, + 0x21e104, + 0x31c107, + 0x33010a, + 0x399d08, + 0x2f0006, + 0x32003, + 0x2cff85, + 0x22b046, + 0x200346, + 0x262046, + 0x220ec3, + 0x38a747, + 0x39f705, + 0x24c083, + 0x3b184d, + 0x2020c3, + 0x341a08, + 0x3abc04, + 0x278705, + 0x2a3306, + 0x2347c6, + 0x20fbc7, + 0x2a4587, + 0x26c785, + 0x204703, + 0x3977c7, + 0x33b749, + 0x258349, + 0x26cd8a, + 0x244002, + 0x238744, + 0x2d7304, + 0x220c47, + 0x23e148, + 0x2dda89, + 0x3a4749, + 0x2ded47, + 0x2d2586, + 0xe0986, + 0x2e1c44, + 0x2e224a, + 0x2e7808, + 0x2e7f09, + 0x29b1c6, + 0x3028c5, + 0x399bc8, + 0x2c070a, + 0x25ad03, + 0x239406, + 0x2dee47, + 0x2115c5, + 0x3abac5, + 0x25b2c3, + 0x2512c4, + 0x22d145, + 0x286687, + 0x2f28c5, + 0x2edd06, + 0x135d45, + 0x226943, + 0x226949, + 0x2784cc, + 0x2ab3cc, + 0x2c7308, + 0x2995c7, + 0x2f1d48, + 0x2f318a, + 0x2f414b, + 0x329408, + 0x3a31c8, + 0x2032c6, + 0x34d3c5, + 0x31f40a, + 0x217cc5, + 0x2012c2, + 0x2bde07, + 0x26ed46, + 0x355b45, + 0x329f89, + 0x237f45, + 0x2be645, + 0x238349, + 0x22aec6, + 0x3650c8, + 0x35b843, + 0x35b8c6, + 0x277446, + 0x300285, + 0x300289, + 0x2b1989, + 0x244347, + 0x100104, + 0x300107, + 0x3a4649, + 0x221545, + 0x3a688, + 0x366d05, + 0x357885, + 0x22a889, + 0x20b0c2, + 0x22b484, + 0x202882, + 0x203fc2, + 0x33f285, + 0x2dd488, + 0x373085, + 0x2bd703, + 0x2bd705, + 0x2ca943, + 0x212602, + 0x269704, + 0x2344c3, + 0x209242, + 0x35a344, + 0x2d8003, + 0x20d102, + 0x2bd783, + 0x28c784, + 0x2b5c43, + 0x23d444, + 0x202942, + 0x275183, + 0x203a03, + 0x209642, + 0x2eab42, + 0x2b17c9, + 0x207e02, + 0x289ec4, + 0x208b42, + 0x399a44, + 0x2d2544, + 0x2e69c4, + 0x203f02, + 0x202f02, + 0x216483, + 0x2e3c43, + 0x310c04, + 0x262e44, + 0x2b1b84, + 0x2c5584, + 0x2ff383, + 0x2af143, + 0x322544, + 0x301544, + 0x301846, + 0x25b3c2, + 0x206a82, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x200882, + 0x327883, + 0x22bf83, + 0x231b03, + 0x204543, + 0x250cc3, + 0x2023c4, + 0x2b1a84, + 0x211004, + 0x24c083, + 0x204703, + 0x21d603, + 0x2e4144, + 0x27a003, + 0x2b3003, + 0x347104, + 0x366b06, + 0x208143, + 0x21afc3, + 0x211a83, + 0x2b0d83, + 0x2387c3, + 0x232003, + 0x2298c5, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x2da603, + 0x22e9c3, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x202243, + 0x24c083, + 0x232704, + 0x204703, + 0x29e044, + 0x2b7685, + 0x206a82, + 0x200e42, + 0x20f582, + 0x202b42, + 0x200fc2, + 0x22bf83, + 0x234a44, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x211004, + 0x24c083, + 0x204703, + 0x20b803, + 0x223504, + 0x77a48, + 0x22bf83, + 0x2020c3, + 0x247204, + 0x77a48, + 0x22bf83, + 0x248fc4, + 0x2023c4, + 0x2020c3, + 0x2018c2, + 0x204703, + 0x2298c3, + 0x2ebf05, + 0x2012c2, + 0x301683, + 0x200882, + 0x77a48, + 0x206a82, + 0x231b03, + 0x250cc3, + 0x202002, + 0x204703, + 0x200882, + 0x200707, + 0x25ba05, + 0x2ba684, + 0x387ac6, + 0x20540b, + 0x267b89, + 0x3a3006, + 0x340a09, + 0x2b2508, + 0x208243, + 0x77a48, + 0x2286c7, + 0x328148, + 0x345883, + 0x3038c4, + 0x32c10b, + 0x266545, + 0x2f7888, + 0x2ea389, + 0x25a0c3, + 0x22bf83, + 0x204488, + 0x2f0fc7, + 0x345e86, + 0x231b03, + 0x345987, + 0x250cc3, + 0x2598c6, + 0x202243, + 0x22d007, + 0x235947, + 0x390f47, + 0x31c085, + 0x20a8c3, + 0x21038b, + 0x265448, + 0x227c88, + 0x33b906, + 0x344b49, + 0x323887, + 0x2fae85, + 0x3af704, + 0x271008, + 0x234e4a, + 0x235089, + 0x26d4c3, + 0x281485, + 0x28da83, + 0x22c806, + 0x2ba4c4, + 0x300488, + 0x388b8b, + 0x33f145, + 0x2b7406, + 0x2ba3c5, + 0x2baa88, + 0x2bb747, + 0x3b0287, + 0x315a47, + 0x2151c4, + 0x30b3c7, + 0x296746, + 0x220ec3, + 0x2c3b08, + 0x24e2c3, + 0x2cac08, + 0x2d42c5, + 0x3ac0c8, + 0x231d07, + 0x24c083, + 0x245c83, + 0x28ae44, + 0x323b87, + 0x209d83, + 0x235a0b, + 0x2039c3, + 0x24e284, + 0x2ebf88, + 0x204703, + 0x2f2f05, + 0x376a05, + 0x3abfc6, + 0x215c45, + 0x2d4684, + 0x209202, + 0x2e81c3, + 0x371dca, + 0x3a24c3, + 0x271549, + 0x30b0c6, + 0x217f88, + 0x28c2c6, + 0x224d87, + 0x2e8788, + 0x2f2d08, + 0x319043, + 0x373143, + 0x22a3c9, + 0x2f61c3, + 0x349646, + 0x25b686, + 0x2445c6, + 0x397389, + 0x2ffd44, + 0x216c43, + 0x2dbd45, + 0x30c709, + 0x22d103, + 0x2fc404, + 0x362ac4, + 0x212dc4, + 0x294846, + 0x20a403, + 0x20a408, + 0x255448, + 0x2eb306, + 0x2fac8b, + 0x2fafc8, + 0x2fb1cb, + 0x2fdd89, + 0x2fd687, + 0x2fe208, + 0x2fedc3, + 0x2e94c6, + 0x39ab47, + 0x297445, + 0x34d8c9, + 0x34448d, + 0x217dd1, + 0x234905, + 0x200882, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x2c8144, + 0x250cc3, + 0x202243, + 0x220ec3, + 0x24c083, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x232003, + 0x24c083, + 0x204703, + 0x263c83, + 0x20b803, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x232003, + 0x24c083, + 0x204703, + 0x212dc2, + 0x200141, + 0x200882, + 0x200001, + 0x311602, + 0x77a48, + 0x21fc45, + 0x200481, + 0x2bf83, + 0x200741, + 0x200081, + 0x201501, + 0x234382, + 0x36a504, + 0x383603, + 0x2007c1, + 0x200901, + 0x200041, + 0x2001c1, + 0x388e07, + 0x2d49cf, + 0x2cadc6, + 0x2000c1, + 0x26c046, + 0x200341, + 0x200cc1, + 0x25040e, + 0x200fc1, + 0x204703, + 0x200ac1, + 0x26f285, + 0x209202, + 0x25b1c5, + 0x200c01, + 0x200241, + 0x200a01, + 0x2012c2, + 0x2002c1, + 0x201d01, + 0x2041c1, + 0x200781, + 0x200641, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x20b743, + 0x22bf83, + 0x250cc3, + 0x8ce48, + 0x220ec3, + 0x24c083, + 0x204703, + 0x14d9688, + 0x77a48, + 0x45684, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x24c083, + 0x204703, + 0x201e03, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x2c8144, + 0x204703, + 0x297985, + 0x325c04, + 0x22bf83, + 0x24c083, + 0x204703, + 0x206a82, + 0x22bf83, + 0x230509, + 0x231b03, + 0x23db49, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x2e1a48, + 0x21d847, + 0x2ebf05, + 0x200707, + 0x20540b, + 0x204f48, + 0x340a09, + 0x2286c7, + 0x204488, + 0x2598c6, + 0x235947, + 0x227c88, + 0x33b906, + 0x323887, + 0x235089, + 0x37ab09, + 0x2b7406, + 0x2b9385, + 0x2c3b08, + 0x24e2c3, + 0x2cac08, + 0x231d07, + 0x209d83, + 0x358107, + 0x215c45, + 0x2dd2c8, + 0x264905, + 0x373143, + 0x2c7b49, + 0x2a9bc7, + 0x2fc404, + 0x362ac4, + 0x2fac8b, + 0x2fafc8, + 0x2fd687, + 0x22bf83, + 0x231b03, + 0x20f583, + 0x204703, + 0x22d3c3, + 0x250cc3, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x200882, + 0x206a82, + 0x204703, + 0x77a48, + 0x200882, + 0x206a82, + 0x20f582, + 0x202002, + 0x200342, + 0x24c083, + 0x200fc2, + 0x200882, + 0x327883, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x20f582, + 0x250cc3, + 0x202243, + 0x220ec3, + 0x211004, + 0x24c083, + 0x21a883, + 0x204703, + 0x2ffd44, + 0x2232c3, + 0x250cc3, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x2020c3, + 0x204703, + 0x39c8c7, + 0x22bf83, + 0x2555c7, + 0x263506, + 0x203983, + 0x210c43, + 0x250cc3, + 0x2037c3, + 0x2023c4, + 0x377844, + 0x2d4746, + 0x250403, + 0x24c083, + 0x204703, + 0x297985, + 0x20d644, + 0x317f43, + 0x2273c3, + 0x2bde07, + 0x2e34c5, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x209ec2, + 0x34ad43, + 0x237643, + 0x327883, + 0x5522bf83, + 0x200d02, + 0x231b03, + 0x209d03, + 0x250cc3, + 0x2023c4, + 0x265603, + 0x2d2a83, + 0x220ec3, + 0x211004, + 0x5560dbc2, + 0x24c083, + 0x204703, + 0x22a543, + 0x2468c3, + 0x212dc2, + 0x2232c3, + 0x77a48, + 0x250cc3, + 0x2cf584, + 0x327883, + 0x206a82, + 0x22bf83, + 0x234a44, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x202243, + 0x2ce884, + 0x30db04, + 0x2cc586, + 0x211004, + 0x24c083, + 0x204703, + 0x21d603, + 0x26ed46, + 0x18b4b, + 0x1c106, + 0x2310a, + 0xfee8a, + 0x77a48, + 0x2036c4, + 0x22bf83, + 0x327844, + 0x231b03, + 0x24a304, + 0x250cc3, + 0x2614c3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x32104b, + 0x39e40a, + 0x3b0a4c, + 0x200882, + 0x206a82, + 0x20f582, + 0x2a8e05, + 0x2023c4, + 0x20f842, + 0x220ec3, + 0x30db04, + 0x202b42, + 0x200fc2, + 0x203682, + 0x212dc2, + 0x127883, + 0x35ecc9, + 0x25b508, + 0x33a1c9, + 0x235789, + 0x23cc0a, + 0x255f4a, + 0x20a1c2, + 0x21eb82, + 0x6a82, + 0x22bf83, + 0x208942, + 0x240e86, + 0x356b42, + 0x218982, + 0x27184e, + 0x2751ce, + 0x27f787, + 0x3804c7, + 0x274802, + 0x231b03, + 0x250cc3, + 0x201b42, + 0x202002, + 0x233fcf, + 0x205482, + 0x23bb07, + 0x33bb07, + 0x365587, + 0x24dbcc, + 0x253f4c, + 0x204b44, + 0x28148a, + 0x28da82, + 0x20ac82, + 0x2b1f04, + 0x2266c2, + 0x2bc342, + 0x254184, + 0x21a982, + 0x217382, + 0x33b987, + 0x27fec5, + 0x233a82, + 0x233f44, + 0x36f542, + 0x2ce2c8, + 0x24c083, + 0x3b2248, + 0x201082, + 0x232fc5, + 0x324146, + 0x204703, + 0x20a742, + 0x2ddcc7, + 0x9202, + 0x274b05, + 0x393f45, + 0x2040c2, + 0x22b382, + 0x31650a, + 0x26c60a, + 0x20b5c2, + 0x320784, + 0x201f02, + 0x238608, + 0x20a302, + 0x33b148, + 0x2f7bc7, + 0x2f7ec9, + 0x274b82, + 0x2fdfc5, + 0x25b9c5, + 0x2c13cb, + 0x2c210c, + 0x22ce88, + 0x2fe388, + 0x25b3c2, + 0x20fc82, + 0x200882, + 0x77a48, + 0x206a82, + 0x22bf83, + 0x20f582, + 0x202b42, + 0x200fc2, + 0x204703, + 0x203682, + 0x200882, + 0x57606a82, + 0x57a50cc3, + 0x39a883, + 0x20f842, + 0x24c083, + 0x3a2c03, + 0x204703, + 0x2d9f83, + 0x274846, + 0x160b803, + 0x77a48, + 0x62145, + 0x6ae47, + 0x58200182, + 0x58600ec2, + 0x58a01e02, + 0x58e02902, + 0x592136c2, + 0x59601482, + 0x59a06a82, + 0x59e0e542, + 0x5a221982, + 0x5a601582, + 0x2751c3, + 0x201503, + 0x5aa17982, + 0x5ae01c82, + 0x49507, + 0x5b233602, + 0x5b600902, + 0x5ba048c2, + 0x5be0b542, + 0x5c204982, + 0x5c602002, + 0xbac45, + 0x226103, + 0x20b504, + 0x5ca266c2, + 0x5ce35c82, + 0x5d200102, + 0x7c80b, + 0x5d600982, + 0x5de0ad82, + 0x5e20f842, + 0x5e600342, + 0x5ea50702, + 0x5ee02ac2, + 0x5f203642, + 0x5f608b82, + 0x5fa0dbc2, + 0x5fe00cc2, + 0x60202b42, + 0x606353c2, + 0x60a02d82, + 0x60e43482, + 0x14c104, + 0x2d2fc3, + 0x61211382, + 0x61619d02, + 0x61a052c2, + 0x61e03502, + 0x62200fc2, + 0x62609242, + 0xdbc07, + 0x62a08582, + 0x62e05fc2, + 0x63203682, + 0x6364ecc2, + 0xed54c, + 0x63a1c642, + 0x63e75bc2, + 0x642076c2, + 0x64601242, + 0x64a0c402, + 0x64e3cd82, + 0x65201d02, + 0x65603b82, + 0x65a777c2, + 0x65e4ffc2, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x5da65603, + 0x2805c3, + 0x229944, + 0x25b406, + 0x2e8283, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x265603, + 0x2805c3, + 0x200482, + 0x200482, + 0x265603, + 0x2805c3, + 0x6662bf83, + 0x231b03, + 0x331043, + 0x220ec3, + 0x24c083, + 0x204703, + 0x77a48, + 0x206a82, + 0x22bf83, + 0x24c083, + 0x204703, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x220ec3, + 0x24c083, + 0x204703, + 0x247204, + 0x206a82, + 0x22bf83, + 0x2cfe43, + 0x231b03, + 0x248fc4, + 0x20f583, + 0x250cc3, + 0x2023c4, + 0x202243, + 0x220ec3, + 0x24c083, + 0x204703, + 0x2298c3, + 0x2ebf05, + 0x237d03, + 0x2232c3, + 0x206a82, + 0x22bf83, + 0x265603, + 0x24c083, + 0x204703, + 0x200882, + 0x327883, + 0x77a48, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x22fb86, + 0x2023c4, + 0x202243, + 0x211004, + 0x24c083, + 0x204703, + 0x21d603, + 0x22bf83, + 0x231b03, + 0x24c083, + 0x204703, + 0x22bf83, + 0x1c106, + 0x231b03, + 0x250cc3, + 0xd19c6, + 0x24c083, + 0x204703, + 0x309748, + 0x30d009, + 0x319509, + 0x329dc8, + 0x37d6c8, + 0x37d6c9, + 0x34385, + 0x200882, + 0x2e3305, + 0x22fc03, + 0x69206a82, + 0x231b03, + 0x250cc3, + 0x22c947, + 0x2387c3, + 0x220ec3, + 0x24c083, + 0x20f543, + 0x213583, + 0x2020c3, + 0x204703, + 0x23b346, + 0x2012c2, + 0x2232c3, + 0x77a48, + 0x200882, + 0x327883, + 0x206a82, + 0x22bf83, + 0x231b03, + 0x250cc3, + 0x2023c4, + 0x220ec3, + 0x24c083, + 0x204703, + 0x20b803, +} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 1 bits] wildcard bit +// [ 2 bits] node type +// [14 bits] high nodes index (exclusive) of children +// [14 bits] low nodes index (inclusive) of children +var children = [...]uint32{ + 0x0, + 0x10000000, + 0x20000000, + 0x40000000, + 0x50000000, + 0x60000000, + 0x1858610, + 0x185c616, + 0x187c617, + 0x19d861f, + 0x19ec676, + 0x1a0067b, + 0x1a10680, + 0x1a2c684, + 0x1a3068b, + 0x1a4868c, + 0x1a6c692, + 0x1a7069b, + 0x1a8869c, + 0x1a8c6a2, + 0x1aa86a3, + 0x1aac6aa, + 0x1af46ab, + 0x1af86bd, + 0x1b186be, + 0x1b2c6c6, + 0x1b306cb, + 0x1b606cc, + 0x1b7c6d8, + 0x1ba46df, + 0x1bac6e9, + 0x1bb06eb, + 0x1c446ec, + 0x1c58711, + 0x1c6c716, + 0x1c9871b, + 0x1ca8726, + 0x1cbc72a, + 0x1ce072f, + 0x1df8738, + 0x1dfc77e, + 0x1e1077f, + 0x1e24784, + 0x1e2c789, + 0x1e3c78b, + 0x1e4078f, + 0x1e58790, + 0x1ea0796, + 0x1eb47a8, + 0x1eb87ad, + 0x1ebc7ae, + 0x1ec47af, + 0x1f007b1, + 0x61f047c0, + 0x1f187c1, + 0x1f1c7c6, + 0x1f2c7c7, + 0x1fdc7cb, + 0x1fe07f7, + 0x21fe87f8, + 0x21fec7fa, + 0x1ff07fb, + 0x20247fc, + 0x2028809, + 0x244880a, + 0x22498912, + 0x2249c926, + 0x24c4927, + 0x24cc931, + 0x224d0933, + 0x24d8934, + 0x224e8936, + 0x224ec93a, + 0x24f893b, + 0x24fc93e, + 0x2250093f, + 0x251c940, + 0x2534947, + 0x253894d, + 0x254894e, + 0x2550952, + 0x22584954, + 0x2588961, + 0x2598962, + 0x25c4966, + 0x25dc971, + 0x25f0977, + 0x261897c, + 0x2638986, + 0x266898e, + 0x269099a, + 0x26949a4, + 0x26b89a5, + 0x26bc9ae, + 0x26d09af, + 0x26d49b4, + 0x26d89b5, + 0x26f89b6, + 0x26fc9be, + 0x270c9bf, + 0x27809c3, + 0x279c9e0, + 0x27a89e7, + 0x27bc9ea, + 0x27d49ef, + 0x27e89f5, + 0x28009fa, + 0x2818a00, + 0x2830a06, + 0x284ca0c, + 0x2864a13, + 0x28c4a19, + 0x28dca31, + 0x28f0a37, + 0x2934a3c, + 0x29b4a4d, + 0x29e0a6d, + 0x29e4a78, + 0x29eca79, + 0x2a0ca7b, + 0x2a10a83, + 0x2a2ca84, + 0x2a34a8b, + 0x2a68a8d, + 0x2aa0a9a, + 0x2aa4aa8, + 0x2ad0aa9, + 0x2ae8ab4, + 0x2b0caba, + 0x2b2cac3, + 0x30f0acb, + 0x30fcc3c, + 0x311cc3f, + 0x32d8c47, + 0x33a8cb6, + 0x3418cea, + 0x3470d06, + 0x3558d1c, + 0x35b0d56, + 0x35ecd6c, + 0x36e8d7b, + 0x37b4dba, + 0x384cded, + 0x38dce13, + 0x3940e37, + 0x3b78e50, + 0x3c30ede, + 0x3cfcf0c, + 0x3d48f3f, + 0x3dd0f52, + 0x3e0cf74, + 0x3e5cf83, + 0x3ed4f97, + 0x63ed8fb5, + 0x63edcfb6, + 0x63ee0fb7, + 0x3f5cfb8, + 0x3fc0fd7, + 0x403cff0, + 0x40b500f, + 0x413502d, + 0x41a104d, + 0x42cd068, + 0x43250b3, + 0x643290c9, + 0x43c10ca, + 0x44490f0, + 0x4495112, + 0x44fd125, + 0x45a513f, + 0x466d169, + 0x46d519b, + 0x47e91b5, + 0x647ed1fa, + 0x647f11fb, + 0x484d1fc, + 0x48a9213, + 0x493922a, + 0x49b524e, + 0x49f926d, + 0x4add27e, + 0x4b112b7, + 0x4b712c4, + 0x4be52dc, + 0x4c6d2f9, + 0x4cad31b, + 0x4d1d32b, + 0x64d21347, + 0x64d25348, + 0x24d29349, + 0x4d4134a, + 0x4d5d350, + 0x4da1357, + 0x4db1368, + 0x4dc936c, + 0x4e41372, + 0x4e55390, + 0x4e6d395, + 0x4e9139b, + 0x4ea53a4, + 0x4ec13a9, + 0x4ec53b0, + 0x4ecd3b1, + 0x4f093b3, + 0x4f1d3c2, + 0x4f253c7, + 0x4f2d3c9, + 0x4f313cb, + 0x4f553cc, + 0x4f793d5, + 0x4f913de, + 0x4f953e4, + 0x4f9d3e5, + 0x4fa13e7, + 0x4ff53e8, + 0x50193fd, + 0x5039406, + 0x505540e, + 0x5065415, + 0x5079419, + 0x507d41e, + 0x508541f, + 0x5099421, + 0x50a9426, + 0x50ad42a, + 0x50c942b, + 0x5959432, + 0x5991656, + 0x59bd664, + 0x59d566f, + 0x59f5675, + 0x659f967d, + 0x5a3d67e, + 0x5a4568f, + 0x25a49691, + 0x25a4d692, + 0x5a51693, + 0x5b71694, + 0x25b756dc, + 0x25b7d6dd, + 0x25b856df, + 0x25b916e1, + 0x5b956e4, + 0x5bbd6e5, + 0x5be56ef, + 0x5be96f9, + 0x25c216fa, + 0x5c31708, + 0x678970c, + 0x678d9e2, + 0x67919e3, + 0x267959e4, + 0x67999e5, + 0x2679d9e6, + 0x67a19e7, + 0x267ad9e8, + 0x67b19eb, + 0x67b59ec, + 0x267b99ed, + 0x67bd9ee, + 0x267c59ef, + 0x67c99f1, + 0x67cd9f2, + 0x267dd9f3, + 0x67e19f7, + 0x67e59f8, + 0x67e99f9, + 0x67ed9fa, + 0x267f19fb, + 0x67f59fc, + 0x67f99fd, + 0x67fd9fe, + 0x68019ff, + 0x26809a00, + 0x680da02, + 0x6811a03, + 0x6815a04, + 0x26819a05, + 0x681da06, + 0x26825a07, + 0x26829a09, + 0x6845a0a, + 0x6851a11, + 0x6891a14, + 0x6895a24, + 0x68b9a25, + 0x69fda2e, + 0x26a05a7f, + 0x26a09a81, + 0x26a0da82, + 0x6a15a83, + 0x6af1a85, + 0x6af5abc, + 0x6b21abd, + 0x6b41ac8, + 0x6b4dad0, + 0x6b6dad3, + 0x6ba5adb, + 0x6e3dae9, + 0x6ef9b8f, + 0x6f0dbbe, + 0x6f41bc3, + 0x6f6dbd0, + 0x6f89bdb, + 0x6fadbe2, + 0x6fc5beb, + 0x6fe1bf1, + 0x7005bf8, + 0x7015c01, + 0x7045c05, + 0x7061c11, + 0x726dc18, + 0x7291c9b, + 0x72b1ca4, + 0x72c5cac, + 0x72d9cb1, + 0x72f9cb6, + 0x739dcbe, + 0x73b9ce7, + 0x73d5cee, + 0x73d9cf5, + 0x73ddcf6, + 0x73e1cf7, + 0x73f5cf8, + 0x7415cfd, + 0x7421d05, + 0x7451d08, + 0x74d1d14, + 0x74e5d34, + 0x74e9d39, + 0x7501d3a, + 0x750dd40, + 0x7511d43, + 0x752dd44, + 0x7569d4b, + 0x756dd5a, + 0x758dd5b, + 0x75ddd63, + 0x75f5d77, + 0x7649d7d, + 0x764dd92, + 0x7651d93, + 0x7695d94, + 0x76a5da5, + 0x76ddda9, + 0x770ddb7, + 0x7849dc3, + 0x786de12, + 0x7899e1b, + 0x78a1e26, + 0x78a5e28, + 0x79ade29, + 0x79b9e6b, + 0x79c5e6e, + 0x79d1e71, + 0x79dde74, + 0x79e9e77, + 0x79f5e7a, + 0x7a01e7d, + 0x7a0de80, + 0x7a19e83, + 0x7a25e86, + 0x7a31e89, + 0x7a3de8c, + 0x7a49e8f, + 0x7a51e92, + 0x7a5de94, + 0x7a69e97, + 0x7a75e9a, + 0x7a81e9d, + 0x7a8dea0, + 0x7a99ea3, + 0x7aa5ea6, + 0x7ab1ea9, + 0x7abdeac, + 0x7ac9eaf, + 0x7ad5eb2, + 0x7ae1eb5, + 0x7aedeb8, + 0x7af9ebb, + 0x7b05ebe, + 0x7b11ec1, + 0x7b1dec4, + 0x7b25ec7, + 0x7b31ec9, + 0x7b3decc, + 0x7b49ecf, + 0x7b55ed2, + 0x7b61ed5, + 0x7b6ded8, + 0x7b79edb, + 0x7b85ede, + 0x7b91ee1, + 0x7b9dee4, + 0x7ba9ee7, + 0x7bb5eea, + 0x7bc1eed, + 0x7bc9ef0, + 0x7bd5ef2, + 0x7be1ef5, + 0x7bedef8, + 0x7bf9efb, + 0x7c05efe, + 0x7c11f01, + 0x7c1df04, + 0x7c29f07, + 0x7c2df0a, + 0x7c39f0b, + 0x7c51f0e, + 0x7c55f14, + 0x7c65f15, + 0x7c7df19, + 0x7cc1f1f, + 0x7cd5f30, + 0x7d09f35, + 0x7d19f42, + 0x7d35f46, + 0x7d4df4d, + 0x7d51f53, + 0x27d95f54, + 0x7d99f65, + 0x7dc5f66, +} + +// max children 421 (capacity 511) +// max text offset 27811 (capacity 32767) +// max text length 36 (capacity 63) +// max hi 8049 (capacity 16383) +// max lo 8038 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go new file mode 100644 index 00000000000..2c974762e2c --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -0,0 +1,16075 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +var rules = [...]string{ + "ac", + "com.ac", + "edu.ac", + "gov.ac", + "net.ac", + "mil.ac", + "org.ac", + "ad", + "nom.ad", + "ae", + "co.ae", + "net.ae", + "org.ae", + "sch.ae", + "ac.ae", + "gov.ae", + "mil.ae", + "aero", + "accident-investigation.aero", + "accident-prevention.aero", + "aerobatic.aero", + "aeroclub.aero", + "aerodrome.aero", + "agents.aero", + "aircraft.aero", + "airline.aero", + "airport.aero", + "air-surveillance.aero", + "airtraffic.aero", + "air-traffic-control.aero", + "ambulance.aero", + "amusement.aero", + "association.aero", + "author.aero", + "ballooning.aero", + "broker.aero", + "caa.aero", + "cargo.aero", + "catering.aero", + "certification.aero", + "championship.aero", + "charter.aero", + "civilaviation.aero", + "club.aero", + "conference.aero", + "consultant.aero", + "consulting.aero", + "control.aero", + "council.aero", + "crew.aero", + "design.aero", + "dgca.aero", + "educator.aero", + "emergency.aero", + "engine.aero", + "engineer.aero", + "entertainment.aero", + "equipment.aero", + "exchange.aero", + "express.aero", + "federation.aero", + "flight.aero", + "freight.aero", + "fuel.aero", + "gliding.aero", + "government.aero", + "groundhandling.aero", + "group.aero", + "hanggliding.aero", + "homebuilt.aero", + "insurance.aero", + "journal.aero", + "journalist.aero", + "leasing.aero", + "logistics.aero", + "magazine.aero", + "maintenance.aero", + "media.aero", + "microlight.aero", + "modelling.aero", + "navigation.aero", + "parachuting.aero", + "paragliding.aero", + "passenger-association.aero", + "pilot.aero", + "press.aero", + "production.aero", + "recreation.aero", + "repbody.aero", + "res.aero", + "research.aero", + "rotorcraft.aero", + "safety.aero", + "scientist.aero", + "services.aero", + "show.aero", + "skydiving.aero", + "software.aero", + "student.aero", + "trader.aero", + "trading.aero", + "trainer.aero", + "union.aero", + "workinggroup.aero", + "works.aero", + "af", + "gov.af", + "com.af", + "org.af", + "net.af", + "edu.af", + "ag", + "com.ag", + "org.ag", + "net.ag", + "co.ag", + "nom.ag", + "ai", + "off.ai", + "com.ai", + "net.ai", + "org.ai", + "al", + "com.al", + "edu.al", + "gov.al", + "mil.al", + "net.al", + "org.al", + "am", + "ao", + "ed.ao", + "gv.ao", + "og.ao", + "co.ao", + "pb.ao", + "it.ao", + "aq", + "ar", + "com.ar", + "edu.ar", + "gob.ar", + "gov.ar", + "int.ar", + "mil.ar", + "net.ar", + "org.ar", + "tur.ar", + "arpa", + "e164.arpa", + "in-addr.arpa", + "ip6.arpa", + "iris.arpa", + "uri.arpa", + "urn.arpa", + "as", + "gov.as", + "asia", + "at", + "ac.at", + "co.at", + "gv.at", + "or.at", + "au", + "com.au", + "net.au", + "org.au", + "edu.au", + "gov.au", + "asn.au", + "id.au", + "info.au", + "conf.au", + "oz.au", + "act.au", + "nsw.au", + "nt.au", + "qld.au", + "sa.au", + "tas.au", + "vic.au", + "wa.au", + "act.edu.au", + "nsw.edu.au", + "nt.edu.au", + "qld.edu.au", + "sa.edu.au", + "tas.edu.au", + "vic.edu.au", + "wa.edu.au", + "qld.gov.au", + "sa.gov.au", + "tas.gov.au", + "vic.gov.au", + "wa.gov.au", + "aw", + "com.aw", + "ax", + "az", + "com.az", + "net.az", + "int.az", + "gov.az", + "org.az", + "edu.az", + "info.az", + "pp.az", + "mil.az", + "name.az", + "pro.az", + "biz.az", + "ba", + "com.ba", + "edu.ba", + "gov.ba", + "mil.ba", + "net.ba", + "org.ba", + "bb", + "biz.bb", + "co.bb", + "com.bb", + "edu.bb", + "gov.bb", + "info.bb", + "net.bb", + "org.bb", + "store.bb", + "tv.bb", + "*.bd", + "be", + "ac.be", + "bf", + "gov.bf", + "bg", + "a.bg", + "b.bg", + "c.bg", + "d.bg", + "e.bg", + "f.bg", + "g.bg", + "h.bg", + "i.bg", + "j.bg", + "k.bg", + "l.bg", + "m.bg", + "n.bg", + "o.bg", + "p.bg", + "q.bg", + "r.bg", + "s.bg", + "t.bg", + "u.bg", + "v.bg", + "w.bg", + "x.bg", + "y.bg", + "z.bg", + "0.bg", + "1.bg", + "2.bg", + "3.bg", + "4.bg", + "5.bg", + "6.bg", + "7.bg", + "8.bg", + "9.bg", + "bh", + "com.bh", + "edu.bh", + "net.bh", + "org.bh", + "gov.bh", + "bi", + "co.bi", + "com.bi", + "edu.bi", + "or.bi", + "org.bi", + "biz", + "bj", + "asso.bj", + "barreau.bj", + "gouv.bj", + "bm", + "com.bm", + "edu.bm", + "gov.bm", + "net.bm", + "org.bm", + "*.bn", + "bo", + "com.bo", + "edu.bo", + "gov.bo", + "gob.bo", + "int.bo", + "org.bo", + "net.bo", + "mil.bo", + "tv.bo", + "br", + "adm.br", + "adv.br", + "agr.br", + "am.br", + "arq.br", + "art.br", + "ato.br", + "b.br", + "bio.br", + "blog.br", + "bmd.br", + "cim.br", + "cng.br", + "cnt.br", + "com.br", + "coop.br", + "ecn.br", + "eco.br", + "edu.br", + "emp.br", + "eng.br", + "esp.br", + "etc.br", + "eti.br", + "far.br", + "flog.br", + "fm.br", + "fnd.br", + "fot.br", + "fst.br", + "g12.br", + "ggf.br", + "gov.br", + "imb.br", + "ind.br", + "inf.br", + "jor.br", + "jus.br", + "leg.br", + "lel.br", + "mat.br", + "med.br", + "mil.br", + "mp.br", + "mus.br", + "net.br", + "*.nom.br", + "not.br", + "ntr.br", + "odo.br", + "org.br", + "ppg.br", + "pro.br", + "psc.br", + "psi.br", + "qsl.br", + "radio.br", + "rec.br", + "slg.br", + "srv.br", + "taxi.br", + "teo.br", + "tmp.br", + "trd.br", + "tur.br", + "tv.br", + "vet.br", + "vlog.br", + "wiki.br", + "zlg.br", + "bs", + "com.bs", + "net.bs", + "org.bs", + "edu.bs", + "gov.bs", + "bt", + "com.bt", + "edu.bt", + "gov.bt", + "net.bt", + "org.bt", + "bv", + "bw", + "co.bw", + "org.bw", + "by", + "gov.by", + "mil.by", + "com.by", + "of.by", + "bz", + "com.bz", + "net.bz", + "org.bz", + "edu.bz", + "gov.bz", + "ca", + "ab.ca", + "bc.ca", + "mb.ca", + "nb.ca", + "nf.ca", + "nl.ca", + "ns.ca", + "nt.ca", + "nu.ca", + "on.ca", + "pe.ca", + "qc.ca", + "sk.ca", + "yk.ca", + "gc.ca", + "cat", + "cc", + "cd", + "gov.cd", + "cf", + "cg", + "ch", + "ci", + "org.ci", + "or.ci", + "com.ci", + "co.ci", + "edu.ci", + "ed.ci", + "ac.ci", + "net.ci", + "go.ci", + "asso.ci", + "xn--aroport-bya.ci", + "int.ci", + "presse.ci", + "md.ci", + "gouv.ci", + "*.ck", + "!www.ck", + "cl", + "gov.cl", + "gob.cl", + "co.cl", + "mil.cl", + "cm", + "co.cm", + "com.cm", + "gov.cm", + "net.cm", + "cn", + "ac.cn", + "com.cn", + "edu.cn", + "gov.cn", + "net.cn", + "org.cn", + "mil.cn", + "xn--55qx5d.cn", + "xn--io0a7i.cn", + "xn--od0alg.cn", + "ah.cn", + "bj.cn", + "cq.cn", + "fj.cn", + "gd.cn", + "gs.cn", + "gz.cn", + "gx.cn", + "ha.cn", + "hb.cn", + "he.cn", + "hi.cn", + "hl.cn", + "hn.cn", + "jl.cn", + "js.cn", + "jx.cn", + "ln.cn", + "nm.cn", + "nx.cn", + "qh.cn", + "sc.cn", + "sd.cn", + "sh.cn", + "sn.cn", + "sx.cn", + "tj.cn", + "xj.cn", + "xz.cn", + "yn.cn", + "zj.cn", + "hk.cn", + "mo.cn", + "tw.cn", + "co", + "arts.co", + "com.co", + "edu.co", + "firm.co", + "gov.co", + "info.co", + "int.co", + "mil.co", + "net.co", + "nom.co", + "org.co", + "rec.co", + "web.co", + "com", + "coop", + "cr", + "ac.cr", + "co.cr", + "ed.cr", + "fi.cr", + "go.cr", + "or.cr", + "sa.cr", + "cu", + "com.cu", + "edu.cu", + "org.cu", + "net.cu", + "gov.cu", + "inf.cu", + "cv", + "cw", + "com.cw", + "edu.cw", + "net.cw", + "org.cw", + "cx", + "gov.cx", + "ac.cy", + "biz.cy", + "com.cy", + "ekloges.cy", + "gov.cy", + "ltd.cy", + "name.cy", + "net.cy", + "org.cy", + "parliament.cy", + "press.cy", + "pro.cy", + "tm.cy", + "cz", + "de", + "dj", + "dk", + "dm", + "com.dm", + "net.dm", + "org.dm", + "edu.dm", + "gov.dm", + "do", + "art.do", + "com.do", + "edu.do", + "gob.do", + "gov.do", + "mil.do", + "net.do", + "org.do", + "sld.do", + "web.do", + "dz", + "com.dz", + "org.dz", + "net.dz", + "gov.dz", + "edu.dz", + "asso.dz", + "pol.dz", + "art.dz", + "ec", + "com.ec", + "info.ec", + "net.ec", + "fin.ec", + "k12.ec", + "med.ec", + "pro.ec", + "org.ec", + "edu.ec", + "gov.ec", + "gob.ec", + "mil.ec", + "edu", + "ee", + "edu.ee", + "gov.ee", + "riik.ee", + "lib.ee", + "med.ee", + "com.ee", + "pri.ee", + "aip.ee", + "org.ee", + "fie.ee", + "eg", + "com.eg", + "edu.eg", + "eun.eg", + "gov.eg", + "mil.eg", + "name.eg", + "net.eg", + "org.eg", + "sci.eg", + "*.er", + "es", + "com.es", + "nom.es", + "org.es", + "gob.es", + "edu.es", + "et", + "com.et", + "gov.et", + "org.et", + "edu.et", + "biz.et", + "name.et", + "info.et", + "net.et", + "eu", + "fi", + "aland.fi", + "*.fj", + "*.fk", + "fm", + "fo", + "fr", + "com.fr", + "asso.fr", + "nom.fr", + "prd.fr", + "presse.fr", + "tm.fr", + "aeroport.fr", + "assedic.fr", + "avocat.fr", + "avoues.fr", + "cci.fr", + "chambagri.fr", + "chirurgiens-dentistes.fr", + "experts-comptables.fr", + "geometre-expert.fr", + "gouv.fr", + "greta.fr", + "huissier-justice.fr", + "medecin.fr", + "notaires.fr", + "pharmacien.fr", + "port.fr", + "veterinaire.fr", + "ga", + "gb", + "gd", + "ge", + "com.ge", + "edu.ge", + "gov.ge", + "org.ge", + "mil.ge", + "net.ge", + "pvt.ge", + "gf", + "gg", + "co.gg", + "net.gg", + "org.gg", + "gh", + "com.gh", + "edu.gh", + "gov.gh", + "org.gh", + "mil.gh", + "gi", + "com.gi", + "ltd.gi", + "gov.gi", + "mod.gi", + "edu.gi", + "org.gi", + "gl", + "co.gl", + "com.gl", + "edu.gl", + "net.gl", + "org.gl", + "gm", + "gn", + "ac.gn", + "com.gn", + "edu.gn", + "gov.gn", + "org.gn", + "net.gn", + "gov", + "gp", + "com.gp", + "net.gp", + "mobi.gp", + "edu.gp", + "org.gp", + "asso.gp", + "gq", + "gr", + "com.gr", + "edu.gr", + "net.gr", + "org.gr", + "gov.gr", + "gs", + "gt", + "com.gt", + "edu.gt", + "gob.gt", + "ind.gt", + "mil.gt", + "net.gt", + "org.gt", + "*.gu", + "gw", + "gy", + "co.gy", + "com.gy", + "edu.gy", + "gov.gy", + "net.gy", + "org.gy", + "hk", + "com.hk", + "edu.hk", + "gov.hk", + "idv.hk", + "net.hk", + "org.hk", + "xn--55qx5d.hk", + "xn--wcvs22d.hk", + "xn--lcvr32d.hk", + "xn--mxtq1m.hk", + "xn--gmqw5a.hk", + "xn--ciqpn.hk", + "xn--gmq050i.hk", + "xn--zf0avx.hk", + "xn--io0a7i.hk", + "xn--mk0axi.hk", + "xn--od0alg.hk", + "xn--od0aq3b.hk", + "xn--tn0ag.hk", + "xn--uc0atv.hk", + "xn--uc0ay4a.hk", + "hm", + "hn", + "com.hn", + "edu.hn", + "org.hn", + "net.hn", + "mil.hn", + "gob.hn", + "hr", + "iz.hr", + "from.hr", + "name.hr", + "com.hr", + "ht", + "com.ht", + "shop.ht", + "firm.ht", + "info.ht", + "adult.ht", + "net.ht", + "pro.ht", + "org.ht", + "med.ht", + "art.ht", + "coop.ht", + "pol.ht", + "asso.ht", + "edu.ht", + "rel.ht", + "gouv.ht", + "perso.ht", + "hu", + "co.hu", + "info.hu", + "org.hu", + "priv.hu", + "sport.hu", + "tm.hu", + "2000.hu", + "agrar.hu", + "bolt.hu", + "casino.hu", + "city.hu", + "erotica.hu", + "erotika.hu", + "film.hu", + "forum.hu", + "games.hu", + "hotel.hu", + "ingatlan.hu", + "jogasz.hu", + "konyvelo.hu", + "lakas.hu", + "media.hu", + "news.hu", + "reklam.hu", + "sex.hu", + "shop.hu", + "suli.hu", + "szex.hu", + "tozsde.hu", + "utazas.hu", + "video.hu", + "id", + "ac.id", + "biz.id", + "co.id", + "desa.id", + "go.id", + "mil.id", + "my.id", + "net.id", + "or.id", + "sch.id", + "web.id", + "ie", + "gov.ie", + "il", + "ac.il", + "co.il", + "gov.il", + "idf.il", + "k12.il", + "muni.il", + "net.il", + "org.il", + "im", + "ac.im", + "co.im", + "com.im", + "ltd.co.im", + "net.im", + "org.im", + "plc.co.im", + "tt.im", + "tv.im", + "in", + "co.in", + "firm.in", + "net.in", + "org.in", + "gen.in", + "ind.in", + "nic.in", + "ac.in", + "edu.in", + "res.in", + "gov.in", + "mil.in", + "info", + "int", + "eu.int", + "io", + "com.io", + "iq", + "gov.iq", + "edu.iq", + "mil.iq", + "com.iq", + "org.iq", + "net.iq", + "ir", + "ac.ir", + "co.ir", + "gov.ir", + "id.ir", + "net.ir", + "org.ir", + "sch.ir", + "xn--mgba3a4f16a.ir", + "xn--mgba3a4fra.ir", + "is", + "net.is", + "com.is", + "edu.is", + "gov.is", + "org.is", + "int.is", + "it", + "gov.it", + "edu.it", + "abr.it", + "abruzzo.it", + "aosta-valley.it", + "aostavalley.it", + "bas.it", + "basilicata.it", + "cal.it", + "calabria.it", + "cam.it", + "campania.it", + "emilia-romagna.it", + "emiliaromagna.it", + "emr.it", + "friuli-v-giulia.it", + "friuli-ve-giulia.it", + "friuli-vegiulia.it", + "friuli-venezia-giulia.it", + "friuli-veneziagiulia.it", + "friuli-vgiulia.it", + "friuliv-giulia.it", + "friulive-giulia.it", + "friulivegiulia.it", + "friulivenezia-giulia.it", + "friuliveneziagiulia.it", + "friulivgiulia.it", + "fvg.it", + "laz.it", + "lazio.it", + "lig.it", + "liguria.it", + "lom.it", + "lombardia.it", + "lombardy.it", + "lucania.it", + "mar.it", + "marche.it", + "mol.it", + "molise.it", + "piedmont.it", + "piemonte.it", + "pmn.it", + "pug.it", + "puglia.it", + "sar.it", + "sardegna.it", + "sardinia.it", + "sic.it", + "sicilia.it", + "sicily.it", + "taa.it", + "tos.it", + "toscana.it", + "trentino-a-adige.it", + "trentino-aadige.it", + "trentino-alto-adige.it", + "trentino-altoadige.it", + "trentino-s-tirol.it", + "trentino-stirol.it", + "trentino-sud-tirol.it", + "trentino-sudtirol.it", + "trentino-sued-tirol.it", + "trentino-suedtirol.it", + "trentinoa-adige.it", + "trentinoaadige.it", + "trentinoalto-adige.it", + "trentinoaltoadige.it", + "trentinos-tirol.it", + "trentinostirol.it", + "trentinosud-tirol.it", + "trentinosudtirol.it", + "trentinosued-tirol.it", + "trentinosuedtirol.it", + "tuscany.it", + "umb.it", + "umbria.it", + "val-d-aosta.it", + "val-daosta.it", + "vald-aosta.it", + "valdaosta.it", + "valle-aosta.it", + "valle-d-aosta.it", + "valle-daosta.it", + "valleaosta.it", + "valled-aosta.it", + "valledaosta.it", + "vallee-aoste.it", + "valleeaoste.it", + "vao.it", + "vda.it", + "ven.it", + "veneto.it", + "ag.it", + "agrigento.it", + "al.it", + "alessandria.it", + "alto-adige.it", + "altoadige.it", + "an.it", + "ancona.it", + "andria-barletta-trani.it", + "andria-trani-barletta.it", + "andriabarlettatrani.it", + "andriatranibarletta.it", + "ao.it", + "aosta.it", + "aoste.it", + "ap.it", + "aq.it", + "aquila.it", + "ar.it", + "arezzo.it", + "ascoli-piceno.it", + "ascolipiceno.it", + "asti.it", + "at.it", + "av.it", + "avellino.it", + "ba.it", + "balsan.it", + "bari.it", + "barletta-trani-andria.it", + "barlettatraniandria.it", + "belluno.it", + "benevento.it", + "bergamo.it", + "bg.it", + "bi.it", + "biella.it", + "bl.it", + "bn.it", + "bo.it", + "bologna.it", + "bolzano.it", + "bozen.it", + "br.it", + "brescia.it", + "brindisi.it", + "bs.it", + "bt.it", + "bz.it", + "ca.it", + "cagliari.it", + "caltanissetta.it", + "campidano-medio.it", + "campidanomedio.it", + "campobasso.it", + "carbonia-iglesias.it", + "carboniaiglesias.it", + "carrara-massa.it", + "carraramassa.it", + "caserta.it", + "catania.it", + "catanzaro.it", + "cb.it", + "ce.it", + "cesena-forli.it", + "cesenaforli.it", + "ch.it", + "chieti.it", + "ci.it", + "cl.it", + "cn.it", + "co.it", + "como.it", + "cosenza.it", + "cr.it", + "cremona.it", + "crotone.it", + "cs.it", + "ct.it", + "cuneo.it", + "cz.it", + "dell-ogliastra.it", + "dellogliastra.it", + "en.it", + "enna.it", + "fc.it", + "fe.it", + "fermo.it", + "ferrara.it", + "fg.it", + "fi.it", + "firenze.it", + "florence.it", + "fm.it", + "foggia.it", + "forli-cesena.it", + "forlicesena.it", + "fr.it", + "frosinone.it", + "ge.it", + "genoa.it", + "genova.it", + "go.it", + "gorizia.it", + "gr.it", + "grosseto.it", + "iglesias-carbonia.it", + "iglesiascarbonia.it", + "im.it", + "imperia.it", + "is.it", + "isernia.it", + "kr.it", + "la-spezia.it", + "laquila.it", + "laspezia.it", + "latina.it", + "lc.it", + "le.it", + "lecce.it", + "lecco.it", + "li.it", + "livorno.it", + "lo.it", + "lodi.it", + "lt.it", + "lu.it", + "lucca.it", + "macerata.it", + "mantova.it", + "massa-carrara.it", + "massacarrara.it", + "matera.it", + "mb.it", + "mc.it", + "me.it", + "medio-campidano.it", + "mediocampidano.it", + "messina.it", + "mi.it", + "milan.it", + "milano.it", + "mn.it", + "mo.it", + "modena.it", + "monza-brianza.it", + "monza-e-della-brianza.it", + "monza.it", + "monzabrianza.it", + "monzaebrianza.it", + "monzaedellabrianza.it", + "ms.it", + "mt.it", + "na.it", + "naples.it", + "napoli.it", + "no.it", + "novara.it", + "nu.it", + "nuoro.it", + "og.it", + "ogliastra.it", + "olbia-tempio.it", + "olbiatempio.it", + "or.it", + "oristano.it", + "ot.it", + "pa.it", + "padova.it", + "padua.it", + "palermo.it", + "parma.it", + "pavia.it", + "pc.it", + "pd.it", + "pe.it", + "perugia.it", + "pesaro-urbino.it", + "pesarourbino.it", + "pescara.it", + "pg.it", + "pi.it", + "piacenza.it", + "pisa.it", + "pistoia.it", + "pn.it", + "po.it", + "pordenone.it", + "potenza.it", + "pr.it", + "prato.it", + "pt.it", + "pu.it", + "pv.it", + "pz.it", + "ra.it", + "ragusa.it", + "ravenna.it", + "rc.it", + "re.it", + "reggio-calabria.it", + "reggio-emilia.it", + "reggiocalabria.it", + "reggioemilia.it", + "rg.it", + "ri.it", + "rieti.it", + "rimini.it", + "rm.it", + "rn.it", + "ro.it", + "roma.it", + "rome.it", + "rovigo.it", + "sa.it", + "salerno.it", + "sassari.it", + "savona.it", + "si.it", + "siena.it", + "siracusa.it", + "so.it", + "sondrio.it", + "sp.it", + "sr.it", + "ss.it", + "suedtirol.it", + "sv.it", + "ta.it", + "taranto.it", + "te.it", + "tempio-olbia.it", + "tempioolbia.it", + "teramo.it", + "terni.it", + "tn.it", + "to.it", + "torino.it", + "tp.it", + "tr.it", + "trani-andria-barletta.it", + "trani-barletta-andria.it", + "traniandriabarletta.it", + "tranibarlettaandria.it", + "trapani.it", + "trentino.it", + "trento.it", + "treviso.it", + "trieste.it", + "ts.it", + "turin.it", + "tv.it", + "ud.it", + "udine.it", + "urbino-pesaro.it", + "urbinopesaro.it", + "va.it", + "varese.it", + "vb.it", + "vc.it", + "ve.it", + "venezia.it", + "venice.it", + "verbania.it", + "vercelli.it", + "verona.it", + "vi.it", + "vibo-valentia.it", + "vibovalentia.it", + "vicenza.it", + "viterbo.it", + "vr.it", + "vs.it", + "vt.it", + "vv.it", + "je", + "co.je", + "net.je", + "org.je", + "*.jm", + "jo", + "com.jo", + "org.jo", + "net.jo", + "edu.jo", + "sch.jo", + "gov.jo", + "mil.jo", + "name.jo", + "jobs", + "jp", + "ac.jp", + "ad.jp", + "co.jp", + "ed.jp", + "go.jp", + "gr.jp", + "lg.jp", + "ne.jp", + "or.jp", + "aichi.jp", + "akita.jp", + "aomori.jp", + "chiba.jp", + "ehime.jp", + "fukui.jp", + "fukuoka.jp", + "fukushima.jp", + "gifu.jp", + "gunma.jp", + "hiroshima.jp", + "hokkaido.jp", + "hyogo.jp", + "ibaraki.jp", + "ishikawa.jp", + "iwate.jp", + "kagawa.jp", + "kagoshima.jp", + "kanagawa.jp", + "kochi.jp", + "kumamoto.jp", + "kyoto.jp", + "mie.jp", + "miyagi.jp", + "miyazaki.jp", + "nagano.jp", + "nagasaki.jp", + "nara.jp", + "niigata.jp", + "oita.jp", + "okayama.jp", + "okinawa.jp", + "osaka.jp", + "saga.jp", + "saitama.jp", + "shiga.jp", + "shimane.jp", + "shizuoka.jp", + "tochigi.jp", + "tokushima.jp", + "tokyo.jp", + "tottori.jp", + "toyama.jp", + "wakayama.jp", + "yamagata.jp", + "yamaguchi.jp", + "yamanashi.jp", + "xn--4pvxs.jp", + "xn--vgu402c.jp", + "xn--c3s14m.jp", + "xn--f6qx53a.jp", + "xn--8pvr4u.jp", + "xn--uist22h.jp", + "xn--djrs72d6uy.jp", + "xn--mkru45i.jp", + "xn--0trq7p7nn.jp", + "xn--8ltr62k.jp", + "xn--2m4a15e.jp", + "xn--efvn9s.jp", + "xn--32vp30h.jp", + "xn--4it797k.jp", + "xn--1lqs71d.jp", + "xn--5rtp49c.jp", + "xn--5js045d.jp", + "xn--ehqz56n.jp", + "xn--1lqs03n.jp", + "xn--qqqt11m.jp", + "xn--kbrq7o.jp", + "xn--pssu33l.jp", + "xn--ntsq17g.jp", + "xn--uisz3g.jp", + "xn--6btw5a.jp", + "xn--1ctwo.jp", + "xn--6orx2r.jp", + "xn--rht61e.jp", + "xn--rht27z.jp", + "xn--djty4k.jp", + "xn--nit225k.jp", + "xn--rht3d.jp", + "xn--klty5x.jp", + "xn--kltx9a.jp", + "xn--kltp7d.jp", + "xn--uuwu58a.jp", + "xn--zbx025d.jp", + "xn--ntso0iqx3a.jp", + "xn--elqq16h.jp", + "xn--4it168d.jp", + "xn--klt787d.jp", + "xn--rny31h.jp", + "xn--7t0a264c.jp", + "xn--5rtq34k.jp", + "xn--k7yn95e.jp", + "xn--tor131o.jp", + "xn--d5qv7z876c.jp", + "*.kawasaki.jp", + "*.kitakyushu.jp", + "*.kobe.jp", + "*.nagoya.jp", + "*.sapporo.jp", + "*.sendai.jp", + "*.yokohama.jp", + "!city.kawasaki.jp", + "!city.kitakyushu.jp", + "!city.kobe.jp", + "!city.nagoya.jp", + "!city.sapporo.jp", + "!city.sendai.jp", + "!city.yokohama.jp", + "aisai.aichi.jp", + "ama.aichi.jp", + "anjo.aichi.jp", + "asuke.aichi.jp", + "chiryu.aichi.jp", + "chita.aichi.jp", + "fuso.aichi.jp", + "gamagori.aichi.jp", + "handa.aichi.jp", + "hazu.aichi.jp", + "hekinan.aichi.jp", + "higashiura.aichi.jp", + "ichinomiya.aichi.jp", + "inazawa.aichi.jp", + "inuyama.aichi.jp", + "isshiki.aichi.jp", + "iwakura.aichi.jp", + "kanie.aichi.jp", + "kariya.aichi.jp", + "kasugai.aichi.jp", + "kira.aichi.jp", + "kiyosu.aichi.jp", + "komaki.aichi.jp", + "konan.aichi.jp", + "kota.aichi.jp", + "mihama.aichi.jp", + "miyoshi.aichi.jp", + "nishio.aichi.jp", + "nisshin.aichi.jp", + "obu.aichi.jp", + "oguchi.aichi.jp", + "oharu.aichi.jp", + "okazaki.aichi.jp", + "owariasahi.aichi.jp", + "seto.aichi.jp", + "shikatsu.aichi.jp", + "shinshiro.aichi.jp", + "shitara.aichi.jp", + "tahara.aichi.jp", + "takahama.aichi.jp", + "tobishima.aichi.jp", + "toei.aichi.jp", + "togo.aichi.jp", + "tokai.aichi.jp", + "tokoname.aichi.jp", + "toyoake.aichi.jp", + "toyohashi.aichi.jp", + "toyokawa.aichi.jp", + "toyone.aichi.jp", + "toyota.aichi.jp", + "tsushima.aichi.jp", + "yatomi.aichi.jp", + "akita.akita.jp", + "daisen.akita.jp", + "fujisato.akita.jp", + "gojome.akita.jp", + "hachirogata.akita.jp", + "happou.akita.jp", + "higashinaruse.akita.jp", + "honjo.akita.jp", + "honjyo.akita.jp", + "ikawa.akita.jp", + "kamikoani.akita.jp", + "kamioka.akita.jp", + "katagami.akita.jp", + "kazuno.akita.jp", + "kitaakita.akita.jp", + "kosaka.akita.jp", + "kyowa.akita.jp", + "misato.akita.jp", + "mitane.akita.jp", + "moriyoshi.akita.jp", + "nikaho.akita.jp", + "noshiro.akita.jp", + "odate.akita.jp", + "oga.akita.jp", + "ogata.akita.jp", + "semboku.akita.jp", + "yokote.akita.jp", + "yurihonjo.akita.jp", + "aomori.aomori.jp", + "gonohe.aomori.jp", + "hachinohe.aomori.jp", + "hashikami.aomori.jp", + "hiranai.aomori.jp", + "hirosaki.aomori.jp", + "itayanagi.aomori.jp", + "kuroishi.aomori.jp", + "misawa.aomori.jp", + "mutsu.aomori.jp", + "nakadomari.aomori.jp", + "noheji.aomori.jp", + "oirase.aomori.jp", + "owani.aomori.jp", + "rokunohe.aomori.jp", + "sannohe.aomori.jp", + "shichinohe.aomori.jp", + "shingo.aomori.jp", + "takko.aomori.jp", + "towada.aomori.jp", + "tsugaru.aomori.jp", + "tsuruta.aomori.jp", + "abiko.chiba.jp", + "asahi.chiba.jp", + "chonan.chiba.jp", + "chosei.chiba.jp", + "choshi.chiba.jp", + "chuo.chiba.jp", + "funabashi.chiba.jp", + "futtsu.chiba.jp", + "hanamigawa.chiba.jp", + "ichihara.chiba.jp", + "ichikawa.chiba.jp", + "ichinomiya.chiba.jp", + "inzai.chiba.jp", + "isumi.chiba.jp", + "kamagaya.chiba.jp", + "kamogawa.chiba.jp", + "kashiwa.chiba.jp", + "katori.chiba.jp", + "katsuura.chiba.jp", + "kimitsu.chiba.jp", + "kisarazu.chiba.jp", + "kozaki.chiba.jp", + "kujukuri.chiba.jp", + "kyonan.chiba.jp", + "matsudo.chiba.jp", + "midori.chiba.jp", + "mihama.chiba.jp", + "minamiboso.chiba.jp", + "mobara.chiba.jp", + "mutsuzawa.chiba.jp", + "nagara.chiba.jp", + "nagareyama.chiba.jp", + "narashino.chiba.jp", + "narita.chiba.jp", + "noda.chiba.jp", + "oamishirasato.chiba.jp", + "omigawa.chiba.jp", + "onjuku.chiba.jp", + "otaki.chiba.jp", + "sakae.chiba.jp", + "sakura.chiba.jp", + "shimofusa.chiba.jp", + "shirako.chiba.jp", + "shiroi.chiba.jp", + "shisui.chiba.jp", + "sodegaura.chiba.jp", + "sosa.chiba.jp", + "tako.chiba.jp", + "tateyama.chiba.jp", + "togane.chiba.jp", + "tohnosho.chiba.jp", + "tomisato.chiba.jp", + "urayasu.chiba.jp", + "yachimata.chiba.jp", + "yachiyo.chiba.jp", + "yokaichiba.chiba.jp", + "yokoshibahikari.chiba.jp", + "yotsukaido.chiba.jp", + "ainan.ehime.jp", + "honai.ehime.jp", + "ikata.ehime.jp", + "imabari.ehime.jp", + "iyo.ehime.jp", + "kamijima.ehime.jp", + "kihoku.ehime.jp", + "kumakogen.ehime.jp", + "masaki.ehime.jp", + "matsuno.ehime.jp", + "matsuyama.ehime.jp", + "namikata.ehime.jp", + "niihama.ehime.jp", + "ozu.ehime.jp", + "saijo.ehime.jp", + "seiyo.ehime.jp", + "shikokuchuo.ehime.jp", + "tobe.ehime.jp", + "toon.ehime.jp", + "uchiko.ehime.jp", + "uwajima.ehime.jp", + "yawatahama.ehime.jp", + "echizen.fukui.jp", + "eiheiji.fukui.jp", + "fukui.fukui.jp", + "ikeda.fukui.jp", + "katsuyama.fukui.jp", + "mihama.fukui.jp", + "minamiechizen.fukui.jp", + "obama.fukui.jp", + "ohi.fukui.jp", + "ono.fukui.jp", + "sabae.fukui.jp", + "sakai.fukui.jp", + "takahama.fukui.jp", + "tsuruga.fukui.jp", + "wakasa.fukui.jp", + "ashiya.fukuoka.jp", + "buzen.fukuoka.jp", + "chikugo.fukuoka.jp", + "chikuho.fukuoka.jp", + "chikujo.fukuoka.jp", + "chikushino.fukuoka.jp", + "chikuzen.fukuoka.jp", + "chuo.fukuoka.jp", + "dazaifu.fukuoka.jp", + "fukuchi.fukuoka.jp", + "hakata.fukuoka.jp", + "higashi.fukuoka.jp", + "hirokawa.fukuoka.jp", + "hisayama.fukuoka.jp", + "iizuka.fukuoka.jp", + "inatsuki.fukuoka.jp", + "kaho.fukuoka.jp", + "kasuga.fukuoka.jp", + "kasuya.fukuoka.jp", + "kawara.fukuoka.jp", + "keisen.fukuoka.jp", + "koga.fukuoka.jp", + "kurate.fukuoka.jp", + "kurogi.fukuoka.jp", + "kurume.fukuoka.jp", + "minami.fukuoka.jp", + "miyako.fukuoka.jp", + "miyama.fukuoka.jp", + "miyawaka.fukuoka.jp", + "mizumaki.fukuoka.jp", + "munakata.fukuoka.jp", + "nakagawa.fukuoka.jp", + "nakama.fukuoka.jp", + "nishi.fukuoka.jp", + "nogata.fukuoka.jp", + "ogori.fukuoka.jp", + "okagaki.fukuoka.jp", + "okawa.fukuoka.jp", + "oki.fukuoka.jp", + "omuta.fukuoka.jp", + "onga.fukuoka.jp", + "onojo.fukuoka.jp", + "oto.fukuoka.jp", + "saigawa.fukuoka.jp", + "sasaguri.fukuoka.jp", + "shingu.fukuoka.jp", + "shinyoshitomi.fukuoka.jp", + "shonai.fukuoka.jp", + "soeda.fukuoka.jp", + "sue.fukuoka.jp", + "tachiarai.fukuoka.jp", + "tagawa.fukuoka.jp", + "takata.fukuoka.jp", + "toho.fukuoka.jp", + "toyotsu.fukuoka.jp", + "tsuiki.fukuoka.jp", + "ukiha.fukuoka.jp", + "umi.fukuoka.jp", + "usui.fukuoka.jp", + "yamada.fukuoka.jp", + "yame.fukuoka.jp", + "yanagawa.fukuoka.jp", + "yukuhashi.fukuoka.jp", + "aizubange.fukushima.jp", + "aizumisato.fukushima.jp", + "aizuwakamatsu.fukushima.jp", + "asakawa.fukushima.jp", + "bandai.fukushima.jp", + "date.fukushima.jp", + "fukushima.fukushima.jp", + "furudono.fukushima.jp", + "futaba.fukushima.jp", + "hanawa.fukushima.jp", + "higashi.fukushima.jp", + "hirata.fukushima.jp", + "hirono.fukushima.jp", + "iitate.fukushima.jp", + "inawashiro.fukushima.jp", + "ishikawa.fukushima.jp", + "iwaki.fukushima.jp", + "izumizaki.fukushima.jp", + "kagamiishi.fukushima.jp", + "kaneyama.fukushima.jp", + "kawamata.fukushima.jp", + "kitakata.fukushima.jp", + "kitashiobara.fukushima.jp", + "koori.fukushima.jp", + "koriyama.fukushima.jp", + "kunimi.fukushima.jp", + "miharu.fukushima.jp", + "mishima.fukushima.jp", + "namie.fukushima.jp", + "nango.fukushima.jp", + "nishiaizu.fukushima.jp", + "nishigo.fukushima.jp", + "okuma.fukushima.jp", + "omotego.fukushima.jp", + "ono.fukushima.jp", + "otama.fukushima.jp", + "samegawa.fukushima.jp", + "shimogo.fukushima.jp", + "shirakawa.fukushima.jp", + "showa.fukushima.jp", + "soma.fukushima.jp", + "sukagawa.fukushima.jp", + "taishin.fukushima.jp", + "tamakawa.fukushima.jp", + "tanagura.fukushima.jp", + "tenei.fukushima.jp", + "yabuki.fukushima.jp", + "yamato.fukushima.jp", + "yamatsuri.fukushima.jp", + "yanaizu.fukushima.jp", + "yugawa.fukushima.jp", + "anpachi.gifu.jp", + "ena.gifu.jp", + "gifu.gifu.jp", + "ginan.gifu.jp", + "godo.gifu.jp", + "gujo.gifu.jp", + "hashima.gifu.jp", + "hichiso.gifu.jp", + "hida.gifu.jp", + "higashishirakawa.gifu.jp", + "ibigawa.gifu.jp", + "ikeda.gifu.jp", + "kakamigahara.gifu.jp", + "kani.gifu.jp", + "kasahara.gifu.jp", + "kasamatsu.gifu.jp", + "kawaue.gifu.jp", + "kitagata.gifu.jp", + "mino.gifu.jp", + "minokamo.gifu.jp", + "mitake.gifu.jp", + "mizunami.gifu.jp", + "motosu.gifu.jp", + "nakatsugawa.gifu.jp", + "ogaki.gifu.jp", + "sakahogi.gifu.jp", + "seki.gifu.jp", + "sekigahara.gifu.jp", + "shirakawa.gifu.jp", + "tajimi.gifu.jp", + "takayama.gifu.jp", + "tarui.gifu.jp", + "toki.gifu.jp", + "tomika.gifu.jp", + "wanouchi.gifu.jp", + "yamagata.gifu.jp", + "yaotsu.gifu.jp", + "yoro.gifu.jp", + "annaka.gunma.jp", + "chiyoda.gunma.jp", + "fujioka.gunma.jp", + "higashiagatsuma.gunma.jp", + "isesaki.gunma.jp", + "itakura.gunma.jp", + "kanna.gunma.jp", + "kanra.gunma.jp", + "katashina.gunma.jp", + "kawaba.gunma.jp", + "kiryu.gunma.jp", + "kusatsu.gunma.jp", + "maebashi.gunma.jp", + "meiwa.gunma.jp", + "midori.gunma.jp", + "minakami.gunma.jp", + "naganohara.gunma.jp", + "nakanojo.gunma.jp", + "nanmoku.gunma.jp", + "numata.gunma.jp", + "oizumi.gunma.jp", + "ora.gunma.jp", + "ota.gunma.jp", + "shibukawa.gunma.jp", + "shimonita.gunma.jp", + "shinto.gunma.jp", + "showa.gunma.jp", + "takasaki.gunma.jp", + "takayama.gunma.jp", + "tamamura.gunma.jp", + "tatebayashi.gunma.jp", + "tomioka.gunma.jp", + "tsukiyono.gunma.jp", + "tsumagoi.gunma.jp", + "ueno.gunma.jp", + "yoshioka.gunma.jp", + "asaminami.hiroshima.jp", + "daiwa.hiroshima.jp", + "etajima.hiroshima.jp", + "fuchu.hiroshima.jp", + "fukuyama.hiroshima.jp", + "hatsukaichi.hiroshima.jp", + "higashihiroshima.hiroshima.jp", + "hongo.hiroshima.jp", + "jinsekikogen.hiroshima.jp", + "kaita.hiroshima.jp", + "kui.hiroshima.jp", + "kumano.hiroshima.jp", + "kure.hiroshima.jp", + "mihara.hiroshima.jp", + "miyoshi.hiroshima.jp", + "naka.hiroshima.jp", + "onomichi.hiroshima.jp", + "osakikamijima.hiroshima.jp", + "otake.hiroshima.jp", + "saka.hiroshima.jp", + "sera.hiroshima.jp", + "seranishi.hiroshima.jp", + "shinichi.hiroshima.jp", + "shobara.hiroshima.jp", + "takehara.hiroshima.jp", + "abashiri.hokkaido.jp", + "abira.hokkaido.jp", + "aibetsu.hokkaido.jp", + "akabira.hokkaido.jp", + "akkeshi.hokkaido.jp", + "asahikawa.hokkaido.jp", + "ashibetsu.hokkaido.jp", + "ashoro.hokkaido.jp", + "assabu.hokkaido.jp", + "atsuma.hokkaido.jp", + "bibai.hokkaido.jp", + "biei.hokkaido.jp", + "bifuka.hokkaido.jp", + "bihoro.hokkaido.jp", + "biratori.hokkaido.jp", + "chippubetsu.hokkaido.jp", + "chitose.hokkaido.jp", + "date.hokkaido.jp", + "ebetsu.hokkaido.jp", + "embetsu.hokkaido.jp", + "eniwa.hokkaido.jp", + "erimo.hokkaido.jp", + "esan.hokkaido.jp", + "esashi.hokkaido.jp", + "fukagawa.hokkaido.jp", + "fukushima.hokkaido.jp", + "furano.hokkaido.jp", + "furubira.hokkaido.jp", + "haboro.hokkaido.jp", + "hakodate.hokkaido.jp", + "hamatonbetsu.hokkaido.jp", + "hidaka.hokkaido.jp", + "higashikagura.hokkaido.jp", + "higashikawa.hokkaido.jp", + "hiroo.hokkaido.jp", + "hokuryu.hokkaido.jp", + "hokuto.hokkaido.jp", + "honbetsu.hokkaido.jp", + "horokanai.hokkaido.jp", + "horonobe.hokkaido.jp", + "ikeda.hokkaido.jp", + "imakane.hokkaido.jp", + "ishikari.hokkaido.jp", + "iwamizawa.hokkaido.jp", + "iwanai.hokkaido.jp", + "kamifurano.hokkaido.jp", + "kamikawa.hokkaido.jp", + "kamishihoro.hokkaido.jp", + "kamisunagawa.hokkaido.jp", + "kamoenai.hokkaido.jp", + "kayabe.hokkaido.jp", + "kembuchi.hokkaido.jp", + "kikonai.hokkaido.jp", + "kimobetsu.hokkaido.jp", + "kitahiroshima.hokkaido.jp", + "kitami.hokkaido.jp", + "kiyosato.hokkaido.jp", + "koshimizu.hokkaido.jp", + "kunneppu.hokkaido.jp", + "kuriyama.hokkaido.jp", + "kuromatsunai.hokkaido.jp", + "kushiro.hokkaido.jp", + "kutchan.hokkaido.jp", + "kyowa.hokkaido.jp", + "mashike.hokkaido.jp", + "matsumae.hokkaido.jp", + "mikasa.hokkaido.jp", + "minamifurano.hokkaido.jp", + "mombetsu.hokkaido.jp", + "moseushi.hokkaido.jp", + "mukawa.hokkaido.jp", + "muroran.hokkaido.jp", + "naie.hokkaido.jp", + "nakagawa.hokkaido.jp", + "nakasatsunai.hokkaido.jp", + "nakatombetsu.hokkaido.jp", + "nanae.hokkaido.jp", + "nanporo.hokkaido.jp", + "nayoro.hokkaido.jp", + "nemuro.hokkaido.jp", + "niikappu.hokkaido.jp", + "niki.hokkaido.jp", + "nishiokoppe.hokkaido.jp", + "noboribetsu.hokkaido.jp", + "numata.hokkaido.jp", + "obihiro.hokkaido.jp", + "obira.hokkaido.jp", + "oketo.hokkaido.jp", + "okoppe.hokkaido.jp", + "otaru.hokkaido.jp", + "otobe.hokkaido.jp", + "otofuke.hokkaido.jp", + "otoineppu.hokkaido.jp", + "oumu.hokkaido.jp", + "ozora.hokkaido.jp", + "pippu.hokkaido.jp", + "rankoshi.hokkaido.jp", + "rebun.hokkaido.jp", + "rikubetsu.hokkaido.jp", + "rishiri.hokkaido.jp", + "rishirifuji.hokkaido.jp", + "saroma.hokkaido.jp", + "sarufutsu.hokkaido.jp", + "shakotan.hokkaido.jp", + "shari.hokkaido.jp", + "shibecha.hokkaido.jp", + "shibetsu.hokkaido.jp", + "shikabe.hokkaido.jp", + "shikaoi.hokkaido.jp", + "shimamaki.hokkaido.jp", + "shimizu.hokkaido.jp", + "shimokawa.hokkaido.jp", + "shinshinotsu.hokkaido.jp", + "shintoku.hokkaido.jp", + "shiranuka.hokkaido.jp", + "shiraoi.hokkaido.jp", + "shiriuchi.hokkaido.jp", + "sobetsu.hokkaido.jp", + "sunagawa.hokkaido.jp", + "taiki.hokkaido.jp", + "takasu.hokkaido.jp", + "takikawa.hokkaido.jp", + "takinoue.hokkaido.jp", + "teshikaga.hokkaido.jp", + "tobetsu.hokkaido.jp", + "tohma.hokkaido.jp", + "tomakomai.hokkaido.jp", + "tomari.hokkaido.jp", + "toya.hokkaido.jp", + "toyako.hokkaido.jp", + "toyotomi.hokkaido.jp", + "toyoura.hokkaido.jp", + "tsubetsu.hokkaido.jp", + "tsukigata.hokkaido.jp", + "urakawa.hokkaido.jp", + "urausu.hokkaido.jp", + "uryu.hokkaido.jp", + "utashinai.hokkaido.jp", + "wakkanai.hokkaido.jp", + "wassamu.hokkaido.jp", + "yakumo.hokkaido.jp", + "yoichi.hokkaido.jp", + "aioi.hyogo.jp", + "akashi.hyogo.jp", + "ako.hyogo.jp", + "amagasaki.hyogo.jp", + "aogaki.hyogo.jp", + "asago.hyogo.jp", + "ashiya.hyogo.jp", + "awaji.hyogo.jp", + "fukusaki.hyogo.jp", + "goshiki.hyogo.jp", + "harima.hyogo.jp", + "himeji.hyogo.jp", + "ichikawa.hyogo.jp", + "inagawa.hyogo.jp", + "itami.hyogo.jp", + "kakogawa.hyogo.jp", + "kamigori.hyogo.jp", + "kamikawa.hyogo.jp", + "kasai.hyogo.jp", + "kasuga.hyogo.jp", + "kawanishi.hyogo.jp", + "miki.hyogo.jp", + "minamiawaji.hyogo.jp", + "nishinomiya.hyogo.jp", + "nishiwaki.hyogo.jp", + "ono.hyogo.jp", + "sanda.hyogo.jp", + "sannan.hyogo.jp", + "sasayama.hyogo.jp", + "sayo.hyogo.jp", + "shingu.hyogo.jp", + "shinonsen.hyogo.jp", + "shiso.hyogo.jp", + "sumoto.hyogo.jp", + "taishi.hyogo.jp", + "taka.hyogo.jp", + "takarazuka.hyogo.jp", + "takasago.hyogo.jp", + "takino.hyogo.jp", + "tamba.hyogo.jp", + "tatsuno.hyogo.jp", + "toyooka.hyogo.jp", + "yabu.hyogo.jp", + "yashiro.hyogo.jp", + "yoka.hyogo.jp", + "yokawa.hyogo.jp", + "ami.ibaraki.jp", + "asahi.ibaraki.jp", + "bando.ibaraki.jp", + "chikusei.ibaraki.jp", + "daigo.ibaraki.jp", + "fujishiro.ibaraki.jp", + "hitachi.ibaraki.jp", + "hitachinaka.ibaraki.jp", + "hitachiomiya.ibaraki.jp", + "hitachiota.ibaraki.jp", + "ibaraki.ibaraki.jp", + "ina.ibaraki.jp", + "inashiki.ibaraki.jp", + "itako.ibaraki.jp", + "iwama.ibaraki.jp", + "joso.ibaraki.jp", + "kamisu.ibaraki.jp", + "kasama.ibaraki.jp", + "kashima.ibaraki.jp", + "kasumigaura.ibaraki.jp", + "koga.ibaraki.jp", + "miho.ibaraki.jp", + "mito.ibaraki.jp", + "moriya.ibaraki.jp", + "naka.ibaraki.jp", + "namegata.ibaraki.jp", + "oarai.ibaraki.jp", + "ogawa.ibaraki.jp", + "omitama.ibaraki.jp", + "ryugasaki.ibaraki.jp", + "sakai.ibaraki.jp", + "sakuragawa.ibaraki.jp", + "shimodate.ibaraki.jp", + "shimotsuma.ibaraki.jp", + "shirosato.ibaraki.jp", + "sowa.ibaraki.jp", + "suifu.ibaraki.jp", + "takahagi.ibaraki.jp", + "tamatsukuri.ibaraki.jp", + "tokai.ibaraki.jp", + "tomobe.ibaraki.jp", + "tone.ibaraki.jp", + "toride.ibaraki.jp", + "tsuchiura.ibaraki.jp", + "tsukuba.ibaraki.jp", + "uchihara.ibaraki.jp", + "ushiku.ibaraki.jp", + "yachiyo.ibaraki.jp", + "yamagata.ibaraki.jp", + "yawara.ibaraki.jp", + "yuki.ibaraki.jp", + "anamizu.ishikawa.jp", + "hakui.ishikawa.jp", + "hakusan.ishikawa.jp", + "kaga.ishikawa.jp", + "kahoku.ishikawa.jp", + "kanazawa.ishikawa.jp", + "kawakita.ishikawa.jp", + "komatsu.ishikawa.jp", + "nakanoto.ishikawa.jp", + "nanao.ishikawa.jp", + "nomi.ishikawa.jp", + "nonoichi.ishikawa.jp", + "noto.ishikawa.jp", + "shika.ishikawa.jp", + "suzu.ishikawa.jp", + "tsubata.ishikawa.jp", + "tsurugi.ishikawa.jp", + "uchinada.ishikawa.jp", + "wajima.ishikawa.jp", + "fudai.iwate.jp", + "fujisawa.iwate.jp", + "hanamaki.iwate.jp", + "hiraizumi.iwate.jp", + "hirono.iwate.jp", + "ichinohe.iwate.jp", + "ichinoseki.iwate.jp", + "iwaizumi.iwate.jp", + "iwate.iwate.jp", + "joboji.iwate.jp", + "kamaishi.iwate.jp", + "kanegasaki.iwate.jp", + "karumai.iwate.jp", + "kawai.iwate.jp", + "kitakami.iwate.jp", + "kuji.iwate.jp", + "kunohe.iwate.jp", + "kuzumaki.iwate.jp", + "miyako.iwate.jp", + "mizusawa.iwate.jp", + "morioka.iwate.jp", + "ninohe.iwate.jp", + "noda.iwate.jp", + "ofunato.iwate.jp", + "oshu.iwate.jp", + "otsuchi.iwate.jp", + "rikuzentakata.iwate.jp", + "shiwa.iwate.jp", + "shizukuishi.iwate.jp", + "sumita.iwate.jp", + "tanohata.iwate.jp", + "tono.iwate.jp", + "yahaba.iwate.jp", + "yamada.iwate.jp", + "ayagawa.kagawa.jp", + "higashikagawa.kagawa.jp", + "kanonji.kagawa.jp", + "kotohira.kagawa.jp", + "manno.kagawa.jp", + "marugame.kagawa.jp", + "mitoyo.kagawa.jp", + "naoshima.kagawa.jp", + "sanuki.kagawa.jp", + "tadotsu.kagawa.jp", + "takamatsu.kagawa.jp", + "tonosho.kagawa.jp", + "uchinomi.kagawa.jp", + "utazu.kagawa.jp", + "zentsuji.kagawa.jp", + "akune.kagoshima.jp", + "amami.kagoshima.jp", + "hioki.kagoshima.jp", + "isa.kagoshima.jp", + "isen.kagoshima.jp", + "izumi.kagoshima.jp", + "kagoshima.kagoshima.jp", + "kanoya.kagoshima.jp", + "kawanabe.kagoshima.jp", + "kinko.kagoshima.jp", + "kouyama.kagoshima.jp", + "makurazaki.kagoshima.jp", + "matsumoto.kagoshima.jp", + "minamitane.kagoshima.jp", + "nakatane.kagoshima.jp", + "nishinoomote.kagoshima.jp", + "satsumasendai.kagoshima.jp", + "soo.kagoshima.jp", + "tarumizu.kagoshima.jp", + "yusui.kagoshima.jp", + "aikawa.kanagawa.jp", + "atsugi.kanagawa.jp", + "ayase.kanagawa.jp", + "chigasaki.kanagawa.jp", + "ebina.kanagawa.jp", + "fujisawa.kanagawa.jp", + "hadano.kanagawa.jp", + "hakone.kanagawa.jp", + "hiratsuka.kanagawa.jp", + "isehara.kanagawa.jp", + "kaisei.kanagawa.jp", + "kamakura.kanagawa.jp", + "kiyokawa.kanagawa.jp", + "matsuda.kanagawa.jp", + "minamiashigara.kanagawa.jp", + "miura.kanagawa.jp", + "nakai.kanagawa.jp", + "ninomiya.kanagawa.jp", + "odawara.kanagawa.jp", + "oi.kanagawa.jp", + "oiso.kanagawa.jp", + "sagamihara.kanagawa.jp", + "samukawa.kanagawa.jp", + "tsukui.kanagawa.jp", + "yamakita.kanagawa.jp", + "yamato.kanagawa.jp", + "yokosuka.kanagawa.jp", + "yugawara.kanagawa.jp", + "zama.kanagawa.jp", + "zushi.kanagawa.jp", + "aki.kochi.jp", + "geisei.kochi.jp", + "hidaka.kochi.jp", + "higashitsuno.kochi.jp", + "ino.kochi.jp", + "kagami.kochi.jp", + "kami.kochi.jp", + "kitagawa.kochi.jp", + "kochi.kochi.jp", + "mihara.kochi.jp", + "motoyama.kochi.jp", + "muroto.kochi.jp", + "nahari.kochi.jp", + "nakamura.kochi.jp", + "nankoku.kochi.jp", + "nishitosa.kochi.jp", + "niyodogawa.kochi.jp", + "ochi.kochi.jp", + "okawa.kochi.jp", + "otoyo.kochi.jp", + "otsuki.kochi.jp", + "sakawa.kochi.jp", + "sukumo.kochi.jp", + "susaki.kochi.jp", + "tosa.kochi.jp", + "tosashimizu.kochi.jp", + "toyo.kochi.jp", + "tsuno.kochi.jp", + "umaji.kochi.jp", + "yasuda.kochi.jp", + "yusuhara.kochi.jp", + "amakusa.kumamoto.jp", + "arao.kumamoto.jp", + "aso.kumamoto.jp", + "choyo.kumamoto.jp", + "gyokuto.kumamoto.jp", + "hitoyoshi.kumamoto.jp", + "kamiamakusa.kumamoto.jp", + "kashima.kumamoto.jp", + "kikuchi.kumamoto.jp", + "kumamoto.kumamoto.jp", + "mashiki.kumamoto.jp", + "mifune.kumamoto.jp", + "minamata.kumamoto.jp", + "minamioguni.kumamoto.jp", + "nagasu.kumamoto.jp", + "nishihara.kumamoto.jp", + "oguni.kumamoto.jp", + "ozu.kumamoto.jp", + "sumoto.kumamoto.jp", + "takamori.kumamoto.jp", + "uki.kumamoto.jp", + "uto.kumamoto.jp", + "yamaga.kumamoto.jp", + "yamato.kumamoto.jp", + "yatsushiro.kumamoto.jp", + "ayabe.kyoto.jp", + "fukuchiyama.kyoto.jp", + "higashiyama.kyoto.jp", + "ide.kyoto.jp", + "ine.kyoto.jp", + "joyo.kyoto.jp", + "kameoka.kyoto.jp", + "kamo.kyoto.jp", + "kita.kyoto.jp", + "kizu.kyoto.jp", + "kumiyama.kyoto.jp", + "kyotamba.kyoto.jp", + "kyotanabe.kyoto.jp", + "kyotango.kyoto.jp", + "maizuru.kyoto.jp", + "minami.kyoto.jp", + "minamiyamashiro.kyoto.jp", + "miyazu.kyoto.jp", + "muko.kyoto.jp", + "nagaokakyo.kyoto.jp", + "nakagyo.kyoto.jp", + "nantan.kyoto.jp", + "oyamazaki.kyoto.jp", + "sakyo.kyoto.jp", + "seika.kyoto.jp", + "tanabe.kyoto.jp", + "uji.kyoto.jp", + "ujitawara.kyoto.jp", + "wazuka.kyoto.jp", + "yamashina.kyoto.jp", + "yawata.kyoto.jp", + "asahi.mie.jp", + "inabe.mie.jp", + "ise.mie.jp", + "kameyama.mie.jp", + "kawagoe.mie.jp", + "kiho.mie.jp", + "kisosaki.mie.jp", + "kiwa.mie.jp", + "komono.mie.jp", + "kumano.mie.jp", + "kuwana.mie.jp", + "matsusaka.mie.jp", + "meiwa.mie.jp", + "mihama.mie.jp", + "minamiise.mie.jp", + "misugi.mie.jp", + "miyama.mie.jp", + "nabari.mie.jp", + "shima.mie.jp", + "suzuka.mie.jp", + "tado.mie.jp", + "taiki.mie.jp", + "taki.mie.jp", + "tamaki.mie.jp", + "toba.mie.jp", + "tsu.mie.jp", + "udono.mie.jp", + "ureshino.mie.jp", + "watarai.mie.jp", + "yokkaichi.mie.jp", + "furukawa.miyagi.jp", + "higashimatsushima.miyagi.jp", + "ishinomaki.miyagi.jp", + "iwanuma.miyagi.jp", + "kakuda.miyagi.jp", + "kami.miyagi.jp", + "kawasaki.miyagi.jp", + "marumori.miyagi.jp", + "matsushima.miyagi.jp", + "minamisanriku.miyagi.jp", + "misato.miyagi.jp", + "murata.miyagi.jp", + "natori.miyagi.jp", + "ogawara.miyagi.jp", + "ohira.miyagi.jp", + "onagawa.miyagi.jp", + "osaki.miyagi.jp", + "rifu.miyagi.jp", + "semine.miyagi.jp", + "shibata.miyagi.jp", + "shichikashuku.miyagi.jp", + "shikama.miyagi.jp", + "shiogama.miyagi.jp", + "shiroishi.miyagi.jp", + "tagajo.miyagi.jp", + "taiwa.miyagi.jp", + "tome.miyagi.jp", + "tomiya.miyagi.jp", + "wakuya.miyagi.jp", + "watari.miyagi.jp", + "yamamoto.miyagi.jp", + "zao.miyagi.jp", + "aya.miyazaki.jp", + "ebino.miyazaki.jp", + "gokase.miyazaki.jp", + "hyuga.miyazaki.jp", + "kadogawa.miyazaki.jp", + "kawaminami.miyazaki.jp", + "kijo.miyazaki.jp", + "kitagawa.miyazaki.jp", + "kitakata.miyazaki.jp", + "kitaura.miyazaki.jp", + "kobayashi.miyazaki.jp", + "kunitomi.miyazaki.jp", + "kushima.miyazaki.jp", + "mimata.miyazaki.jp", + "miyakonojo.miyazaki.jp", + "miyazaki.miyazaki.jp", + "morotsuka.miyazaki.jp", + "nichinan.miyazaki.jp", + "nishimera.miyazaki.jp", + "nobeoka.miyazaki.jp", + "saito.miyazaki.jp", + "shiiba.miyazaki.jp", + "shintomi.miyazaki.jp", + "takaharu.miyazaki.jp", + "takanabe.miyazaki.jp", + "takazaki.miyazaki.jp", + "tsuno.miyazaki.jp", + "achi.nagano.jp", + "agematsu.nagano.jp", + "anan.nagano.jp", + "aoki.nagano.jp", + "asahi.nagano.jp", + "azumino.nagano.jp", + "chikuhoku.nagano.jp", + "chikuma.nagano.jp", + "chino.nagano.jp", + "fujimi.nagano.jp", + "hakuba.nagano.jp", + "hara.nagano.jp", + "hiraya.nagano.jp", + "iida.nagano.jp", + "iijima.nagano.jp", + "iiyama.nagano.jp", + "iizuna.nagano.jp", + "ikeda.nagano.jp", + "ikusaka.nagano.jp", + "ina.nagano.jp", + "karuizawa.nagano.jp", + "kawakami.nagano.jp", + "kiso.nagano.jp", + "kisofukushima.nagano.jp", + "kitaaiki.nagano.jp", + "komagane.nagano.jp", + "komoro.nagano.jp", + "matsukawa.nagano.jp", + "matsumoto.nagano.jp", + "miasa.nagano.jp", + "minamiaiki.nagano.jp", + "minamimaki.nagano.jp", + "minamiminowa.nagano.jp", + "minowa.nagano.jp", + "miyada.nagano.jp", + "miyota.nagano.jp", + "mochizuki.nagano.jp", + "nagano.nagano.jp", + "nagawa.nagano.jp", + "nagiso.nagano.jp", + "nakagawa.nagano.jp", + "nakano.nagano.jp", + "nozawaonsen.nagano.jp", + "obuse.nagano.jp", + "ogawa.nagano.jp", + "okaya.nagano.jp", + "omachi.nagano.jp", + "omi.nagano.jp", + "ookuwa.nagano.jp", + "ooshika.nagano.jp", + "otaki.nagano.jp", + "otari.nagano.jp", + "sakae.nagano.jp", + "sakaki.nagano.jp", + "saku.nagano.jp", + "sakuho.nagano.jp", + "shimosuwa.nagano.jp", + "shinanomachi.nagano.jp", + "shiojiri.nagano.jp", + "suwa.nagano.jp", + "suzaka.nagano.jp", + "takagi.nagano.jp", + "takamori.nagano.jp", + "takayama.nagano.jp", + "tateshina.nagano.jp", + "tatsuno.nagano.jp", + "togakushi.nagano.jp", + "togura.nagano.jp", + "tomi.nagano.jp", + "ueda.nagano.jp", + "wada.nagano.jp", + "yamagata.nagano.jp", + "yamanouchi.nagano.jp", + "yasaka.nagano.jp", + "yasuoka.nagano.jp", + "chijiwa.nagasaki.jp", + "futsu.nagasaki.jp", + "goto.nagasaki.jp", + "hasami.nagasaki.jp", + "hirado.nagasaki.jp", + "iki.nagasaki.jp", + "isahaya.nagasaki.jp", + "kawatana.nagasaki.jp", + "kuchinotsu.nagasaki.jp", + "matsuura.nagasaki.jp", + "nagasaki.nagasaki.jp", + "obama.nagasaki.jp", + "omura.nagasaki.jp", + "oseto.nagasaki.jp", + "saikai.nagasaki.jp", + "sasebo.nagasaki.jp", + "seihi.nagasaki.jp", + "shimabara.nagasaki.jp", + "shinkamigoto.nagasaki.jp", + "togitsu.nagasaki.jp", + "tsushima.nagasaki.jp", + "unzen.nagasaki.jp", + "ando.nara.jp", + "gose.nara.jp", + "heguri.nara.jp", + "higashiyoshino.nara.jp", + "ikaruga.nara.jp", + "ikoma.nara.jp", + "kamikitayama.nara.jp", + "kanmaki.nara.jp", + "kashiba.nara.jp", + "kashihara.nara.jp", + "katsuragi.nara.jp", + "kawai.nara.jp", + "kawakami.nara.jp", + "kawanishi.nara.jp", + "koryo.nara.jp", + "kurotaki.nara.jp", + "mitsue.nara.jp", + "miyake.nara.jp", + "nara.nara.jp", + "nosegawa.nara.jp", + "oji.nara.jp", + "ouda.nara.jp", + "oyodo.nara.jp", + "sakurai.nara.jp", + "sango.nara.jp", + "shimoichi.nara.jp", + "shimokitayama.nara.jp", + "shinjo.nara.jp", + "soni.nara.jp", + "takatori.nara.jp", + "tawaramoto.nara.jp", + "tenkawa.nara.jp", + "tenri.nara.jp", + "uda.nara.jp", + "yamatokoriyama.nara.jp", + "yamatotakada.nara.jp", + "yamazoe.nara.jp", + "yoshino.nara.jp", + "aga.niigata.jp", + "agano.niigata.jp", + "gosen.niigata.jp", + "itoigawa.niigata.jp", + "izumozaki.niigata.jp", + "joetsu.niigata.jp", + "kamo.niigata.jp", + "kariwa.niigata.jp", + "kashiwazaki.niigata.jp", + "minamiuonuma.niigata.jp", + "mitsuke.niigata.jp", + "muika.niigata.jp", + "murakami.niigata.jp", + "myoko.niigata.jp", + "nagaoka.niigata.jp", + "niigata.niigata.jp", + "ojiya.niigata.jp", + "omi.niigata.jp", + "sado.niigata.jp", + "sanjo.niigata.jp", + "seiro.niigata.jp", + "seirou.niigata.jp", + "sekikawa.niigata.jp", + "shibata.niigata.jp", + "tagami.niigata.jp", + "tainai.niigata.jp", + "tochio.niigata.jp", + "tokamachi.niigata.jp", + "tsubame.niigata.jp", + "tsunan.niigata.jp", + "uonuma.niigata.jp", + "yahiko.niigata.jp", + "yoita.niigata.jp", + "yuzawa.niigata.jp", + "beppu.oita.jp", + "bungoono.oita.jp", + "bungotakada.oita.jp", + "hasama.oita.jp", + "hiji.oita.jp", + "himeshima.oita.jp", + "hita.oita.jp", + "kamitsue.oita.jp", + "kokonoe.oita.jp", + "kuju.oita.jp", + "kunisaki.oita.jp", + "kusu.oita.jp", + "oita.oita.jp", + "saiki.oita.jp", + "taketa.oita.jp", + "tsukumi.oita.jp", + "usa.oita.jp", + "usuki.oita.jp", + "yufu.oita.jp", + "akaiwa.okayama.jp", + "asakuchi.okayama.jp", + "bizen.okayama.jp", + "hayashima.okayama.jp", + "ibara.okayama.jp", + "kagamino.okayama.jp", + "kasaoka.okayama.jp", + "kibichuo.okayama.jp", + "kumenan.okayama.jp", + "kurashiki.okayama.jp", + "maniwa.okayama.jp", + "misaki.okayama.jp", + "nagi.okayama.jp", + "niimi.okayama.jp", + "nishiawakura.okayama.jp", + "okayama.okayama.jp", + "satosho.okayama.jp", + "setouchi.okayama.jp", + "shinjo.okayama.jp", + "shoo.okayama.jp", + "soja.okayama.jp", + "takahashi.okayama.jp", + "tamano.okayama.jp", + "tsuyama.okayama.jp", + "wake.okayama.jp", + "yakage.okayama.jp", + "aguni.okinawa.jp", + "ginowan.okinawa.jp", + "ginoza.okinawa.jp", + "gushikami.okinawa.jp", + "haebaru.okinawa.jp", + "higashi.okinawa.jp", + "hirara.okinawa.jp", + "iheya.okinawa.jp", + "ishigaki.okinawa.jp", + "ishikawa.okinawa.jp", + "itoman.okinawa.jp", + "izena.okinawa.jp", + "kadena.okinawa.jp", + "kin.okinawa.jp", + "kitadaito.okinawa.jp", + "kitanakagusuku.okinawa.jp", + "kumejima.okinawa.jp", + "kunigami.okinawa.jp", + "minamidaito.okinawa.jp", + "motobu.okinawa.jp", + "nago.okinawa.jp", + "naha.okinawa.jp", + "nakagusuku.okinawa.jp", + "nakijin.okinawa.jp", + "nanjo.okinawa.jp", + "nishihara.okinawa.jp", + "ogimi.okinawa.jp", + "okinawa.okinawa.jp", + "onna.okinawa.jp", + "shimoji.okinawa.jp", + "taketomi.okinawa.jp", + "tarama.okinawa.jp", + "tokashiki.okinawa.jp", + "tomigusuku.okinawa.jp", + "tonaki.okinawa.jp", + "urasoe.okinawa.jp", + "uruma.okinawa.jp", + "yaese.okinawa.jp", + "yomitan.okinawa.jp", + "yonabaru.okinawa.jp", + "yonaguni.okinawa.jp", + "zamami.okinawa.jp", + "abeno.osaka.jp", + "chihayaakasaka.osaka.jp", + "chuo.osaka.jp", + "daito.osaka.jp", + "fujiidera.osaka.jp", + "habikino.osaka.jp", + "hannan.osaka.jp", + "higashiosaka.osaka.jp", + "higashisumiyoshi.osaka.jp", + "higashiyodogawa.osaka.jp", + "hirakata.osaka.jp", + "ibaraki.osaka.jp", + "ikeda.osaka.jp", + "izumi.osaka.jp", + "izumiotsu.osaka.jp", + "izumisano.osaka.jp", + "kadoma.osaka.jp", + "kaizuka.osaka.jp", + "kanan.osaka.jp", + "kashiwara.osaka.jp", + "katano.osaka.jp", + "kawachinagano.osaka.jp", + "kishiwada.osaka.jp", + "kita.osaka.jp", + "kumatori.osaka.jp", + "matsubara.osaka.jp", + "minato.osaka.jp", + "minoh.osaka.jp", + "misaki.osaka.jp", + "moriguchi.osaka.jp", + "neyagawa.osaka.jp", + "nishi.osaka.jp", + "nose.osaka.jp", + "osakasayama.osaka.jp", + "sakai.osaka.jp", + "sayama.osaka.jp", + "sennan.osaka.jp", + "settsu.osaka.jp", + "shijonawate.osaka.jp", + "shimamoto.osaka.jp", + "suita.osaka.jp", + "tadaoka.osaka.jp", + "taishi.osaka.jp", + "tajiri.osaka.jp", + "takaishi.osaka.jp", + "takatsuki.osaka.jp", + "tondabayashi.osaka.jp", + "toyonaka.osaka.jp", + "toyono.osaka.jp", + "yao.osaka.jp", + "ariake.saga.jp", + "arita.saga.jp", + "fukudomi.saga.jp", + "genkai.saga.jp", + "hamatama.saga.jp", + "hizen.saga.jp", + "imari.saga.jp", + "kamimine.saga.jp", + "kanzaki.saga.jp", + "karatsu.saga.jp", + "kashima.saga.jp", + "kitagata.saga.jp", + "kitahata.saga.jp", + "kiyama.saga.jp", + "kouhoku.saga.jp", + "kyuragi.saga.jp", + "nishiarita.saga.jp", + "ogi.saga.jp", + "omachi.saga.jp", + "ouchi.saga.jp", + "saga.saga.jp", + "shiroishi.saga.jp", + "taku.saga.jp", + "tara.saga.jp", + "tosu.saga.jp", + "yoshinogari.saga.jp", + "arakawa.saitama.jp", + "asaka.saitama.jp", + "chichibu.saitama.jp", + "fujimi.saitama.jp", + "fujimino.saitama.jp", + "fukaya.saitama.jp", + "hanno.saitama.jp", + "hanyu.saitama.jp", + "hasuda.saitama.jp", + "hatogaya.saitama.jp", + "hatoyama.saitama.jp", + "hidaka.saitama.jp", + "higashichichibu.saitama.jp", + "higashimatsuyama.saitama.jp", + "honjo.saitama.jp", + "ina.saitama.jp", + "iruma.saitama.jp", + "iwatsuki.saitama.jp", + "kamiizumi.saitama.jp", + "kamikawa.saitama.jp", + "kamisato.saitama.jp", + "kasukabe.saitama.jp", + "kawagoe.saitama.jp", + "kawaguchi.saitama.jp", + "kawajima.saitama.jp", + "kazo.saitama.jp", + "kitamoto.saitama.jp", + "koshigaya.saitama.jp", + "kounosu.saitama.jp", + "kuki.saitama.jp", + "kumagaya.saitama.jp", + "matsubushi.saitama.jp", + "minano.saitama.jp", + "misato.saitama.jp", + "miyashiro.saitama.jp", + "miyoshi.saitama.jp", + "moroyama.saitama.jp", + "nagatoro.saitama.jp", + "namegawa.saitama.jp", + "niiza.saitama.jp", + "ogano.saitama.jp", + "ogawa.saitama.jp", + "ogose.saitama.jp", + "okegawa.saitama.jp", + "omiya.saitama.jp", + "otaki.saitama.jp", + "ranzan.saitama.jp", + "ryokami.saitama.jp", + "saitama.saitama.jp", + "sakado.saitama.jp", + "satte.saitama.jp", + "sayama.saitama.jp", + "shiki.saitama.jp", + "shiraoka.saitama.jp", + "soka.saitama.jp", + "sugito.saitama.jp", + "toda.saitama.jp", + "tokigawa.saitama.jp", + "tokorozawa.saitama.jp", + "tsurugashima.saitama.jp", + "urawa.saitama.jp", + "warabi.saitama.jp", + "yashio.saitama.jp", + "yokoze.saitama.jp", + "yono.saitama.jp", + "yorii.saitama.jp", + "yoshida.saitama.jp", + "yoshikawa.saitama.jp", + "yoshimi.saitama.jp", + "aisho.shiga.jp", + "gamo.shiga.jp", + "higashiomi.shiga.jp", + "hikone.shiga.jp", + "koka.shiga.jp", + "konan.shiga.jp", + "kosei.shiga.jp", + "koto.shiga.jp", + "kusatsu.shiga.jp", + "maibara.shiga.jp", + "moriyama.shiga.jp", + "nagahama.shiga.jp", + "nishiazai.shiga.jp", + "notogawa.shiga.jp", + "omihachiman.shiga.jp", + "otsu.shiga.jp", + "ritto.shiga.jp", + "ryuoh.shiga.jp", + "takashima.shiga.jp", + "takatsuki.shiga.jp", + "torahime.shiga.jp", + "toyosato.shiga.jp", + "yasu.shiga.jp", + "akagi.shimane.jp", + "ama.shimane.jp", + "gotsu.shimane.jp", + "hamada.shimane.jp", + "higashiizumo.shimane.jp", + "hikawa.shimane.jp", + "hikimi.shimane.jp", + "izumo.shimane.jp", + "kakinoki.shimane.jp", + "masuda.shimane.jp", + "matsue.shimane.jp", + "misato.shimane.jp", + "nishinoshima.shimane.jp", + "ohda.shimane.jp", + "okinoshima.shimane.jp", + "okuizumo.shimane.jp", + "shimane.shimane.jp", + "tamayu.shimane.jp", + "tsuwano.shimane.jp", + "unnan.shimane.jp", + "yakumo.shimane.jp", + "yasugi.shimane.jp", + "yatsuka.shimane.jp", + "arai.shizuoka.jp", + "atami.shizuoka.jp", + "fuji.shizuoka.jp", + "fujieda.shizuoka.jp", + "fujikawa.shizuoka.jp", + "fujinomiya.shizuoka.jp", + "fukuroi.shizuoka.jp", + "gotemba.shizuoka.jp", + "haibara.shizuoka.jp", + "hamamatsu.shizuoka.jp", + "higashiizu.shizuoka.jp", + "ito.shizuoka.jp", + "iwata.shizuoka.jp", + "izu.shizuoka.jp", + "izunokuni.shizuoka.jp", + "kakegawa.shizuoka.jp", + "kannami.shizuoka.jp", + "kawanehon.shizuoka.jp", + "kawazu.shizuoka.jp", + "kikugawa.shizuoka.jp", + "kosai.shizuoka.jp", + "makinohara.shizuoka.jp", + "matsuzaki.shizuoka.jp", + "minamiizu.shizuoka.jp", + "mishima.shizuoka.jp", + "morimachi.shizuoka.jp", + "nishiizu.shizuoka.jp", + "numazu.shizuoka.jp", + "omaezaki.shizuoka.jp", + "shimada.shizuoka.jp", + "shimizu.shizuoka.jp", + "shimoda.shizuoka.jp", + "shizuoka.shizuoka.jp", + "susono.shizuoka.jp", + "yaizu.shizuoka.jp", + "yoshida.shizuoka.jp", + "ashikaga.tochigi.jp", + "bato.tochigi.jp", + "haga.tochigi.jp", + "ichikai.tochigi.jp", + "iwafune.tochigi.jp", + "kaminokawa.tochigi.jp", + "kanuma.tochigi.jp", + "karasuyama.tochigi.jp", + "kuroiso.tochigi.jp", + "mashiko.tochigi.jp", + "mibu.tochigi.jp", + "moka.tochigi.jp", + "motegi.tochigi.jp", + "nasu.tochigi.jp", + "nasushiobara.tochigi.jp", + "nikko.tochigi.jp", + "nishikata.tochigi.jp", + "nogi.tochigi.jp", + "ohira.tochigi.jp", + "ohtawara.tochigi.jp", + "oyama.tochigi.jp", + "sakura.tochigi.jp", + "sano.tochigi.jp", + "shimotsuke.tochigi.jp", + "shioya.tochigi.jp", + "takanezawa.tochigi.jp", + "tochigi.tochigi.jp", + "tsuga.tochigi.jp", + "ujiie.tochigi.jp", + "utsunomiya.tochigi.jp", + "yaita.tochigi.jp", + "aizumi.tokushima.jp", + "anan.tokushima.jp", + "ichiba.tokushima.jp", + "itano.tokushima.jp", + "kainan.tokushima.jp", + "komatsushima.tokushima.jp", + "matsushige.tokushima.jp", + "mima.tokushima.jp", + "minami.tokushima.jp", + "miyoshi.tokushima.jp", + "mugi.tokushima.jp", + "nakagawa.tokushima.jp", + "naruto.tokushima.jp", + "sanagochi.tokushima.jp", + "shishikui.tokushima.jp", + "tokushima.tokushima.jp", + "wajiki.tokushima.jp", + "adachi.tokyo.jp", + "akiruno.tokyo.jp", + "akishima.tokyo.jp", + "aogashima.tokyo.jp", + "arakawa.tokyo.jp", + "bunkyo.tokyo.jp", + "chiyoda.tokyo.jp", + "chofu.tokyo.jp", + "chuo.tokyo.jp", + "edogawa.tokyo.jp", + "fuchu.tokyo.jp", + "fussa.tokyo.jp", + "hachijo.tokyo.jp", + "hachioji.tokyo.jp", + "hamura.tokyo.jp", + "higashikurume.tokyo.jp", + "higashimurayama.tokyo.jp", + "higashiyamato.tokyo.jp", + "hino.tokyo.jp", + "hinode.tokyo.jp", + "hinohara.tokyo.jp", + "inagi.tokyo.jp", + "itabashi.tokyo.jp", + "katsushika.tokyo.jp", + "kita.tokyo.jp", + "kiyose.tokyo.jp", + "kodaira.tokyo.jp", + "koganei.tokyo.jp", + "kokubunji.tokyo.jp", + "komae.tokyo.jp", + "koto.tokyo.jp", + "kouzushima.tokyo.jp", + "kunitachi.tokyo.jp", + "machida.tokyo.jp", + "meguro.tokyo.jp", + "minato.tokyo.jp", + "mitaka.tokyo.jp", + "mizuho.tokyo.jp", + "musashimurayama.tokyo.jp", + "musashino.tokyo.jp", + "nakano.tokyo.jp", + "nerima.tokyo.jp", + "ogasawara.tokyo.jp", + "okutama.tokyo.jp", + "ome.tokyo.jp", + "oshima.tokyo.jp", + "ota.tokyo.jp", + "setagaya.tokyo.jp", + "shibuya.tokyo.jp", + "shinagawa.tokyo.jp", + "shinjuku.tokyo.jp", + "suginami.tokyo.jp", + "sumida.tokyo.jp", + "tachikawa.tokyo.jp", + "taito.tokyo.jp", + "tama.tokyo.jp", + "toshima.tokyo.jp", + "chizu.tottori.jp", + "hino.tottori.jp", + "kawahara.tottori.jp", + "koge.tottori.jp", + "kotoura.tottori.jp", + "misasa.tottori.jp", + "nanbu.tottori.jp", + "nichinan.tottori.jp", + "sakaiminato.tottori.jp", + "tottori.tottori.jp", + "wakasa.tottori.jp", + "yazu.tottori.jp", + "yonago.tottori.jp", + "asahi.toyama.jp", + "fuchu.toyama.jp", + "fukumitsu.toyama.jp", + "funahashi.toyama.jp", + "himi.toyama.jp", + "imizu.toyama.jp", + "inami.toyama.jp", + "johana.toyama.jp", + "kamiichi.toyama.jp", + "kurobe.toyama.jp", + "nakaniikawa.toyama.jp", + "namerikawa.toyama.jp", + "nanto.toyama.jp", + "nyuzen.toyama.jp", + "oyabe.toyama.jp", + "taira.toyama.jp", + "takaoka.toyama.jp", + "tateyama.toyama.jp", + "toga.toyama.jp", + "tonami.toyama.jp", + "toyama.toyama.jp", + "unazuki.toyama.jp", + "uozu.toyama.jp", + "yamada.toyama.jp", + "arida.wakayama.jp", + "aridagawa.wakayama.jp", + "gobo.wakayama.jp", + "hashimoto.wakayama.jp", + "hidaka.wakayama.jp", + "hirogawa.wakayama.jp", + "inami.wakayama.jp", + "iwade.wakayama.jp", + "kainan.wakayama.jp", + "kamitonda.wakayama.jp", + "katsuragi.wakayama.jp", + "kimino.wakayama.jp", + "kinokawa.wakayama.jp", + "kitayama.wakayama.jp", + "koya.wakayama.jp", + "koza.wakayama.jp", + "kozagawa.wakayama.jp", + "kudoyama.wakayama.jp", + "kushimoto.wakayama.jp", + "mihama.wakayama.jp", + "misato.wakayama.jp", + "nachikatsuura.wakayama.jp", + "shingu.wakayama.jp", + "shirahama.wakayama.jp", + "taiji.wakayama.jp", + "tanabe.wakayama.jp", + "wakayama.wakayama.jp", + "yuasa.wakayama.jp", + "yura.wakayama.jp", + "asahi.yamagata.jp", + "funagata.yamagata.jp", + "higashine.yamagata.jp", + "iide.yamagata.jp", + "kahoku.yamagata.jp", + "kaminoyama.yamagata.jp", + "kaneyama.yamagata.jp", + "kawanishi.yamagata.jp", + "mamurogawa.yamagata.jp", + "mikawa.yamagata.jp", + "murayama.yamagata.jp", + "nagai.yamagata.jp", + "nakayama.yamagata.jp", + "nanyo.yamagata.jp", + "nishikawa.yamagata.jp", + "obanazawa.yamagata.jp", + "oe.yamagata.jp", + "oguni.yamagata.jp", + "ohkura.yamagata.jp", + "oishida.yamagata.jp", + "sagae.yamagata.jp", + "sakata.yamagata.jp", + "sakegawa.yamagata.jp", + "shinjo.yamagata.jp", + "shirataka.yamagata.jp", + "shonai.yamagata.jp", + "takahata.yamagata.jp", + "tendo.yamagata.jp", + "tozawa.yamagata.jp", + "tsuruoka.yamagata.jp", + "yamagata.yamagata.jp", + "yamanobe.yamagata.jp", + "yonezawa.yamagata.jp", + "yuza.yamagata.jp", + "abu.yamaguchi.jp", + "hagi.yamaguchi.jp", + "hikari.yamaguchi.jp", + "hofu.yamaguchi.jp", + "iwakuni.yamaguchi.jp", + "kudamatsu.yamaguchi.jp", + "mitou.yamaguchi.jp", + "nagato.yamaguchi.jp", + "oshima.yamaguchi.jp", + "shimonoseki.yamaguchi.jp", + "shunan.yamaguchi.jp", + "tabuse.yamaguchi.jp", + "tokuyama.yamaguchi.jp", + "toyota.yamaguchi.jp", + "ube.yamaguchi.jp", + "yuu.yamaguchi.jp", + "chuo.yamanashi.jp", + "doshi.yamanashi.jp", + "fuefuki.yamanashi.jp", + "fujikawa.yamanashi.jp", + "fujikawaguchiko.yamanashi.jp", + "fujiyoshida.yamanashi.jp", + "hayakawa.yamanashi.jp", + "hokuto.yamanashi.jp", + "ichikawamisato.yamanashi.jp", + "kai.yamanashi.jp", + "kofu.yamanashi.jp", + "koshu.yamanashi.jp", + "kosuge.yamanashi.jp", + "minami-alps.yamanashi.jp", + "minobu.yamanashi.jp", + "nakamichi.yamanashi.jp", + "nanbu.yamanashi.jp", + "narusawa.yamanashi.jp", + "nirasaki.yamanashi.jp", + "nishikatsura.yamanashi.jp", + "oshino.yamanashi.jp", + "otsuki.yamanashi.jp", + "showa.yamanashi.jp", + "tabayama.yamanashi.jp", + "tsuru.yamanashi.jp", + "uenohara.yamanashi.jp", + "yamanakako.yamanashi.jp", + "yamanashi.yamanashi.jp", + "*.ke", + "kg", + "org.kg", + "net.kg", + "com.kg", + "edu.kg", + "gov.kg", + "mil.kg", + "*.kh", + "ki", + "edu.ki", + "biz.ki", + "net.ki", + "org.ki", + "gov.ki", + "info.ki", + "com.ki", + "km", + "org.km", + "nom.km", + "gov.km", + "prd.km", + "tm.km", + "edu.km", + "mil.km", + "ass.km", + "com.km", + "coop.km", + "asso.km", + "presse.km", + "medecin.km", + "notaires.km", + "pharmaciens.km", + "veterinaire.km", + "gouv.km", + "kn", + "net.kn", + "org.kn", + "edu.kn", + "gov.kn", + "kp", + "com.kp", + "edu.kp", + "gov.kp", + "org.kp", + "rep.kp", + "tra.kp", + "kr", + "ac.kr", + "co.kr", + "es.kr", + "go.kr", + "hs.kr", + "kg.kr", + "mil.kr", + "ms.kr", + "ne.kr", + "or.kr", + "pe.kr", + "re.kr", + "sc.kr", + "busan.kr", + "chungbuk.kr", + "chungnam.kr", + "daegu.kr", + "daejeon.kr", + "gangwon.kr", + "gwangju.kr", + "gyeongbuk.kr", + "gyeonggi.kr", + "gyeongnam.kr", + "incheon.kr", + "jeju.kr", + "jeonbuk.kr", + "jeonnam.kr", + "seoul.kr", + "ulsan.kr", + "*.kw", + "ky", + "edu.ky", + "gov.ky", + "com.ky", + "org.ky", + "net.ky", + "kz", + "org.kz", + "edu.kz", + "net.kz", + "gov.kz", + "mil.kz", + "com.kz", + "la", + "int.la", + "net.la", + "info.la", + "edu.la", + "gov.la", + "per.la", + "com.la", + "org.la", + "lb", + "com.lb", + "edu.lb", + "gov.lb", + "net.lb", + "org.lb", + "lc", + "com.lc", + "net.lc", + "co.lc", + "org.lc", + "edu.lc", + "gov.lc", + "li", + "lk", + "gov.lk", + "sch.lk", + "net.lk", + "int.lk", + "com.lk", + "org.lk", + "edu.lk", + "ngo.lk", + "soc.lk", + "web.lk", + "ltd.lk", + "assn.lk", + "grp.lk", + "hotel.lk", + "ac.lk", + "lr", + "com.lr", + "edu.lr", + "gov.lr", + "org.lr", + "net.lr", + "ls", + "co.ls", + "org.ls", + "lt", + "gov.lt", + "lu", + "lv", + "com.lv", + "edu.lv", + "gov.lv", + "org.lv", + "mil.lv", + "id.lv", + "net.lv", + "asn.lv", + "conf.lv", + "ly", + "com.ly", + "net.ly", + "gov.ly", + "plc.ly", + "edu.ly", + "sch.ly", + "med.ly", + "org.ly", + "id.ly", + "ma", + "co.ma", + "net.ma", + "gov.ma", + "org.ma", + "ac.ma", + "press.ma", + "mc", + "tm.mc", + "asso.mc", + "md", + "me", + "co.me", + "net.me", + "org.me", + "edu.me", + "ac.me", + "gov.me", + "its.me", + "priv.me", + "mg", + "org.mg", + "nom.mg", + "gov.mg", + "prd.mg", + "tm.mg", + "edu.mg", + "mil.mg", + "com.mg", + "co.mg", + "mh", + "mil", + "mk", + "com.mk", + "org.mk", + "net.mk", + "edu.mk", + "gov.mk", + "inf.mk", + "name.mk", + "ml", + "com.ml", + "edu.ml", + "gouv.ml", + "gov.ml", + "net.ml", + "org.ml", + "presse.ml", + "*.mm", + "mn", + "gov.mn", + "edu.mn", + "org.mn", + "mo", + "com.mo", + "net.mo", + "org.mo", + "edu.mo", + "gov.mo", + "mobi", + "mp", + "mq", + "mr", + "gov.mr", + "ms", + "com.ms", + "edu.ms", + "gov.ms", + "net.ms", + "org.ms", + "mt", + "com.mt", + "edu.mt", + "net.mt", + "org.mt", + "mu", + "com.mu", + "net.mu", + "org.mu", + "gov.mu", + "ac.mu", + "co.mu", + "or.mu", + "museum", + "academy.museum", + "agriculture.museum", + "air.museum", + "airguard.museum", + "alabama.museum", + "alaska.museum", + "amber.museum", + "ambulance.museum", + "american.museum", + "americana.museum", + "americanantiques.museum", + "americanart.museum", + "amsterdam.museum", + "and.museum", + "annefrank.museum", + "anthro.museum", + "anthropology.museum", + "antiques.museum", + "aquarium.museum", + "arboretum.museum", + "archaeological.museum", + "archaeology.museum", + "architecture.museum", + "art.museum", + "artanddesign.museum", + "artcenter.museum", + "artdeco.museum", + "arteducation.museum", + "artgallery.museum", + "arts.museum", + "artsandcrafts.museum", + "asmatart.museum", + "assassination.museum", + "assisi.museum", + "association.museum", + "astronomy.museum", + "atlanta.museum", + "austin.museum", + "australia.museum", + "automotive.museum", + "aviation.museum", + "axis.museum", + "badajoz.museum", + "baghdad.museum", + "bahn.museum", + "bale.museum", + "baltimore.museum", + "barcelona.museum", + "baseball.museum", + "basel.museum", + "baths.museum", + "bauern.museum", + "beauxarts.museum", + "beeldengeluid.museum", + "bellevue.museum", + "bergbau.museum", + "berkeley.museum", + "berlin.museum", + "bern.museum", + "bible.museum", + "bilbao.museum", + "bill.museum", + "birdart.museum", + "birthplace.museum", + "bonn.museum", + "boston.museum", + "botanical.museum", + "botanicalgarden.museum", + "botanicgarden.museum", + "botany.museum", + "brandywinevalley.museum", + "brasil.museum", + "bristol.museum", + "british.museum", + "britishcolumbia.museum", + "broadcast.museum", + "brunel.museum", + "brussel.museum", + "brussels.museum", + "bruxelles.museum", + "building.museum", + "burghof.museum", + "bus.museum", + "bushey.museum", + "cadaques.museum", + "california.museum", + "cambridge.museum", + "can.museum", + "canada.museum", + "capebreton.museum", + "carrier.museum", + "cartoonart.museum", + "casadelamoneda.museum", + "castle.museum", + "castres.museum", + "celtic.museum", + "center.museum", + "chattanooga.museum", + "cheltenham.museum", + "chesapeakebay.museum", + "chicago.museum", + "children.museum", + "childrens.museum", + "childrensgarden.museum", + "chiropractic.museum", + "chocolate.museum", + "christiansburg.museum", + "cincinnati.museum", + "cinema.museum", + "circus.museum", + "civilisation.museum", + "civilization.museum", + "civilwar.museum", + "clinton.museum", + "clock.museum", + "coal.museum", + "coastaldefence.museum", + "cody.museum", + "coldwar.museum", + "collection.museum", + "colonialwilliamsburg.museum", + "coloradoplateau.museum", + "columbia.museum", + "columbus.museum", + "communication.museum", + "communications.museum", + "community.museum", + "computer.museum", + "computerhistory.museum", + "xn--comunicaes-v6a2o.museum", + "contemporary.museum", + "contemporaryart.museum", + "convent.museum", + "copenhagen.museum", + "corporation.museum", + "xn--correios-e-telecomunicaes-ghc29a.museum", + "corvette.museum", + "costume.museum", + "countryestate.museum", + "county.museum", + "crafts.museum", + "cranbrook.museum", + "creation.museum", + "cultural.museum", + "culturalcenter.museum", + "culture.museum", + "cyber.museum", + "cymru.museum", + "dali.museum", + "dallas.museum", + "database.museum", + "ddr.museum", + "decorativearts.museum", + "delaware.museum", + "delmenhorst.museum", + "denmark.museum", + "depot.museum", + "design.museum", + "detroit.museum", + "dinosaur.museum", + "discovery.museum", + "dolls.museum", + "donostia.museum", + "durham.museum", + "eastafrica.museum", + "eastcoast.museum", + "education.museum", + "educational.museum", + "egyptian.museum", + "eisenbahn.museum", + "elburg.museum", + "elvendrell.museum", + "embroidery.museum", + "encyclopedic.museum", + "england.museum", + "entomology.museum", + "environment.museum", + "environmentalconservation.museum", + "epilepsy.museum", + "essex.museum", + "estate.museum", + "ethnology.museum", + "exeter.museum", + "exhibition.museum", + "family.museum", + "farm.museum", + "farmequipment.museum", + "farmers.museum", + "farmstead.museum", + "field.museum", + "figueres.museum", + "filatelia.museum", + "film.museum", + "fineart.museum", + "finearts.museum", + "finland.museum", + "flanders.museum", + "florida.museum", + "force.museum", + "fortmissoula.museum", + "fortworth.museum", + "foundation.museum", + "francaise.museum", + "frankfurt.museum", + "franziskaner.museum", + "freemasonry.museum", + "freiburg.museum", + "fribourg.museum", + "frog.museum", + "fundacio.museum", + "furniture.museum", + "gallery.museum", + "garden.museum", + "gateway.museum", + "geelvinck.museum", + "gemological.museum", + "geology.museum", + "georgia.museum", + "giessen.museum", + "glas.museum", + "glass.museum", + "gorge.museum", + "grandrapids.museum", + "graz.museum", + "guernsey.museum", + "halloffame.museum", + "hamburg.museum", + "handson.museum", + "harvestcelebration.museum", + "hawaii.museum", + "health.museum", + "heimatunduhren.museum", + "hellas.museum", + "helsinki.museum", + "hembygdsforbund.museum", + "heritage.museum", + "histoire.museum", + "historical.museum", + "historicalsociety.museum", + "historichouses.museum", + "historisch.museum", + "historisches.museum", + "history.museum", + "historyofscience.museum", + "horology.museum", + "house.museum", + "humanities.museum", + "illustration.museum", + "imageandsound.museum", + "indian.museum", + "indiana.museum", + "indianapolis.museum", + "indianmarket.museum", + "intelligence.museum", + "interactive.museum", + "iraq.museum", + "iron.museum", + "isleofman.museum", + "jamison.museum", + "jefferson.museum", + "jerusalem.museum", + "jewelry.museum", + "jewish.museum", + "jewishart.museum", + "jfk.museum", + "journalism.museum", + "judaica.museum", + "judygarland.museum", + "juedisches.museum", + "juif.museum", + "karate.museum", + "karikatur.museum", + "kids.museum", + "koebenhavn.museum", + "koeln.museum", + "kunst.museum", + "kunstsammlung.museum", + "kunstunddesign.museum", + "labor.museum", + "labour.museum", + "lajolla.museum", + "lancashire.museum", + "landes.museum", + "lans.museum", + "xn--lns-qla.museum", + "larsson.museum", + "lewismiller.museum", + "lincoln.museum", + "linz.museum", + "living.museum", + "livinghistory.museum", + "localhistory.museum", + "london.museum", + "losangeles.museum", + "louvre.museum", + "loyalist.museum", + "lucerne.museum", + "luxembourg.museum", + "luzern.museum", + "mad.museum", + "madrid.museum", + "mallorca.museum", + "manchester.museum", + "mansion.museum", + "mansions.museum", + "manx.museum", + "marburg.museum", + "maritime.museum", + "maritimo.museum", + "maryland.museum", + "marylhurst.museum", + "media.museum", + "medical.museum", + "medizinhistorisches.museum", + "meeres.museum", + "memorial.museum", + "mesaverde.museum", + "michigan.museum", + "midatlantic.museum", + "military.museum", + "mill.museum", + "miners.museum", + "mining.museum", + "minnesota.museum", + "missile.museum", + "missoula.museum", + "modern.museum", + "moma.museum", + "money.museum", + "monmouth.museum", + "monticello.museum", + "montreal.museum", + "moscow.museum", + "motorcycle.museum", + "muenchen.museum", + "muenster.museum", + "mulhouse.museum", + "muncie.museum", + "museet.museum", + "museumcenter.museum", + "museumvereniging.museum", + "music.museum", + "national.museum", + "nationalfirearms.museum", + "nationalheritage.museum", + "nativeamerican.museum", + "naturalhistory.museum", + "naturalhistorymuseum.museum", + "naturalsciences.museum", + "nature.museum", + "naturhistorisches.museum", + "natuurwetenschappen.museum", + "naumburg.museum", + "naval.museum", + "nebraska.museum", + "neues.museum", + "newhampshire.museum", + "newjersey.museum", + "newmexico.museum", + "newport.museum", + "newspaper.museum", + "newyork.museum", + "niepce.museum", + "norfolk.museum", + "north.museum", + "nrw.museum", + "nuernberg.museum", + "nuremberg.museum", + "nyc.museum", + "nyny.museum", + "oceanographic.museum", + "oceanographique.museum", + "omaha.museum", + "online.museum", + "ontario.museum", + "openair.museum", + "oregon.museum", + "oregontrail.museum", + "otago.museum", + "oxford.museum", + "pacific.museum", + "paderborn.museum", + "palace.museum", + "paleo.museum", + "palmsprings.museum", + "panama.museum", + "paris.museum", + "pasadena.museum", + "pharmacy.museum", + "philadelphia.museum", + "philadelphiaarea.museum", + "philately.museum", + "phoenix.museum", + "photography.museum", + "pilots.museum", + "pittsburgh.museum", + "planetarium.museum", + "plantation.museum", + "plants.museum", + "plaza.museum", + "portal.museum", + "portland.museum", + "portlligat.museum", + "posts-and-telecommunications.museum", + "preservation.museum", + "presidio.museum", + "press.museum", + "project.museum", + "public.museum", + "pubol.museum", + "quebec.museum", + "railroad.museum", + "railway.museum", + "research.museum", + "resistance.museum", + "riodejaneiro.museum", + "rochester.museum", + "rockart.museum", + "roma.museum", + "russia.museum", + "saintlouis.museum", + "salem.museum", + "salvadordali.museum", + "salzburg.museum", + "sandiego.museum", + "sanfrancisco.museum", + "santabarbara.museum", + "santacruz.museum", + "santafe.museum", + "saskatchewan.museum", + "satx.museum", + "savannahga.museum", + "schlesisches.museum", + "schoenbrunn.museum", + "schokoladen.museum", + "school.museum", + "schweiz.museum", + "science.museum", + "scienceandhistory.museum", + "scienceandindustry.museum", + "sciencecenter.museum", + "sciencecenters.museum", + "science-fiction.museum", + "sciencehistory.museum", + "sciences.museum", + "sciencesnaturelles.museum", + "scotland.museum", + "seaport.museum", + "settlement.museum", + "settlers.museum", + "shell.museum", + "sherbrooke.museum", + "sibenik.museum", + "silk.museum", + "ski.museum", + "skole.museum", + "society.museum", + "sologne.museum", + "soundandvision.museum", + "southcarolina.museum", + "southwest.museum", + "space.museum", + "spy.museum", + "square.museum", + "stadt.museum", + "stalbans.museum", + "starnberg.museum", + "state.museum", + "stateofdelaware.museum", + "station.museum", + "steam.museum", + "steiermark.museum", + "stjohn.museum", + "stockholm.museum", + "stpetersburg.museum", + "stuttgart.museum", + "suisse.museum", + "surgeonshall.museum", + "surrey.museum", + "svizzera.museum", + "sweden.museum", + "sydney.museum", + "tank.museum", + "tcm.museum", + "technology.museum", + "telekommunikation.museum", + "television.museum", + "texas.museum", + "textile.museum", + "theater.museum", + "time.museum", + "timekeeping.museum", + "topology.museum", + "torino.museum", + "touch.museum", + "town.museum", + "transport.museum", + "tree.museum", + "trolley.museum", + "trust.museum", + "trustee.museum", + "uhren.museum", + "ulm.museum", + "undersea.museum", + "university.museum", + "usa.museum", + "usantiques.museum", + "usarts.museum", + "uscountryestate.museum", + "usculture.museum", + "usdecorativearts.museum", + "usgarden.museum", + "ushistory.museum", + "ushuaia.museum", + "uslivinghistory.museum", + "utah.museum", + "uvic.museum", + "valley.museum", + "vantaa.museum", + "versailles.museum", + "viking.museum", + "village.museum", + "virginia.museum", + "virtual.museum", + "virtuel.museum", + "vlaanderen.museum", + "volkenkunde.museum", + "wales.museum", + "wallonie.museum", + "war.museum", + "washingtondc.museum", + "watchandclock.museum", + "watch-and-clock.museum", + "western.museum", + "westfalen.museum", + "whaling.museum", + "wildlife.museum", + "williamsburg.museum", + "windmill.museum", + "workshop.museum", + "york.museum", + "yorkshire.museum", + "yosemite.museum", + "youth.museum", + "zoological.museum", + "zoology.museum", + "xn--9dbhblg6di.museum", + "xn--h1aegh.museum", + "mv", + "aero.mv", + "biz.mv", + "com.mv", + "coop.mv", + "edu.mv", + "gov.mv", + "info.mv", + "int.mv", + "mil.mv", + "museum.mv", + "name.mv", + "net.mv", + "org.mv", + "pro.mv", + "mw", + "ac.mw", + "biz.mw", + "co.mw", + "com.mw", + "coop.mw", + "edu.mw", + "gov.mw", + "int.mw", + "museum.mw", + "net.mw", + "org.mw", + "mx", + "com.mx", + "org.mx", + "gob.mx", + "edu.mx", + "net.mx", + "my", + "com.my", + "net.my", + "org.my", + "gov.my", + "edu.my", + "mil.my", + "name.my", + "*.mz", + "!teledata.mz", + "na", + "info.na", + "pro.na", + "name.na", + "school.na", + "or.na", + "dr.na", + "us.na", + "mx.na", + "ca.na", + "in.na", + "cc.na", + "tv.na", + "ws.na", + "mobi.na", + "co.na", + "com.na", + "org.na", + "name", + "nc", + "asso.nc", + "ne", + "net", + "nf", + "com.nf", + "net.nf", + "per.nf", + "rec.nf", + "web.nf", + "arts.nf", + "firm.nf", + "info.nf", + "other.nf", + "store.nf", + "ng", + "com.ng", + "edu.ng", + "gov.ng", + "i.ng", + "mil.ng", + "mobi.ng", + "name.ng", + "net.ng", + "org.ng", + "sch.ng", + "com.ni", + "gob.ni", + "edu.ni", + "org.ni", + "nom.ni", + "net.ni", + "mil.ni", + "co.ni", + "biz.ni", + "web.ni", + "int.ni", + "ac.ni", + "in.ni", + "info.ni", + "nl", + "bv.nl", + "no", + "fhs.no", + "vgs.no", + "fylkesbibl.no", + "folkebibl.no", + "museum.no", + "idrett.no", + "priv.no", + "mil.no", + "stat.no", + "dep.no", + "kommune.no", + "herad.no", + "aa.no", + "ah.no", + "bu.no", + "fm.no", + "hl.no", + "hm.no", + "jan-mayen.no", + "mr.no", + "nl.no", + "nt.no", + "of.no", + "ol.no", + "oslo.no", + "rl.no", + "sf.no", + "st.no", + "svalbard.no", + "tm.no", + "tr.no", + "va.no", + "vf.no", + "gs.aa.no", + "gs.ah.no", + "gs.bu.no", + "gs.fm.no", + "gs.hl.no", + "gs.hm.no", + "gs.jan-mayen.no", + "gs.mr.no", + "gs.nl.no", + "gs.nt.no", + "gs.of.no", + "gs.ol.no", + "gs.oslo.no", + "gs.rl.no", + "gs.sf.no", + "gs.st.no", + "gs.svalbard.no", + "gs.tm.no", + "gs.tr.no", + "gs.va.no", + "gs.vf.no", + "akrehamn.no", + "xn--krehamn-dxa.no", + "algard.no", + "xn--lgrd-poac.no", + "arna.no", + "brumunddal.no", + "bryne.no", + "bronnoysund.no", + "xn--brnnysund-m8ac.no", + "drobak.no", + "xn--drbak-wua.no", + "egersund.no", + "fetsund.no", + "floro.no", + "xn--flor-jra.no", + "fredrikstad.no", + "hokksund.no", + "honefoss.no", + "xn--hnefoss-q1a.no", + "jessheim.no", + "jorpeland.no", + "xn--jrpeland-54a.no", + "kirkenes.no", + "kopervik.no", + "krokstadelva.no", + "langevag.no", + "xn--langevg-jxa.no", + "leirvik.no", + "mjondalen.no", + "xn--mjndalen-64a.no", + "mo-i-rana.no", + "mosjoen.no", + "xn--mosjen-eya.no", + "nesoddtangen.no", + "orkanger.no", + "osoyro.no", + "xn--osyro-wua.no", + "raholt.no", + "xn--rholt-mra.no", + "sandnessjoen.no", + "xn--sandnessjen-ogb.no", + "skedsmokorset.no", + "slattum.no", + "spjelkavik.no", + "stathelle.no", + "stavern.no", + "stjordalshalsen.no", + "xn--stjrdalshalsen-sqb.no", + "tananger.no", + "tranby.no", + "vossevangen.no", + "afjord.no", + "xn--fjord-lra.no", + "agdenes.no", + "al.no", + "xn--l-1fa.no", + "alesund.no", + "xn--lesund-hua.no", + "alstahaug.no", + "alta.no", + "xn--lt-liac.no", + "alaheadju.no", + "xn--laheadju-7ya.no", + "alvdal.no", + "amli.no", + "xn--mli-tla.no", + "amot.no", + "xn--mot-tla.no", + "andebu.no", + "andoy.no", + "xn--andy-ira.no", + "andasuolo.no", + "ardal.no", + "xn--rdal-poa.no", + "aremark.no", + "arendal.no", + "xn--s-1fa.no", + "aseral.no", + "xn--seral-lra.no", + "asker.no", + "askim.no", + "askvoll.no", + "askoy.no", + "xn--asky-ira.no", + "asnes.no", + "xn--snes-poa.no", + "audnedaln.no", + "aukra.no", + "aure.no", + "aurland.no", + "aurskog-holand.no", + "xn--aurskog-hland-jnb.no", + "austevoll.no", + "austrheim.no", + "averoy.no", + "xn--avery-yua.no", + "balestrand.no", + "ballangen.no", + "balat.no", + "xn--blt-elab.no", + "balsfjord.no", + "bahccavuotna.no", + "xn--bhccavuotna-k7a.no", + "bamble.no", + "bardu.no", + "beardu.no", + "beiarn.no", + "bajddar.no", + "xn--bjddar-pta.no", + "baidar.no", + "xn--bidr-5nac.no", + "berg.no", + "bergen.no", + "berlevag.no", + "xn--berlevg-jxa.no", + "bearalvahki.no", + "xn--bearalvhki-y4a.no", + "bindal.no", + "birkenes.no", + "bjarkoy.no", + "xn--bjarky-fya.no", + "bjerkreim.no", + "bjugn.no", + "bodo.no", + "xn--bod-2na.no", + "badaddja.no", + "xn--bdddj-mrabd.no", + "budejju.no", + "bokn.no", + "bremanger.no", + "bronnoy.no", + "xn--brnny-wuac.no", + "bygland.no", + "bykle.no", + "barum.no", + "xn--brum-voa.no", + "bo.telemark.no", + "xn--b-5ga.telemark.no", + "bo.nordland.no", + "xn--b-5ga.nordland.no", + "bievat.no", + "xn--bievt-0qa.no", + "bomlo.no", + "xn--bmlo-gra.no", + "batsfjord.no", + "xn--btsfjord-9za.no", + "bahcavuotna.no", + "xn--bhcavuotna-s4a.no", + "dovre.no", + "drammen.no", + "drangedal.no", + "dyroy.no", + "xn--dyry-ira.no", + "donna.no", + "xn--dnna-gra.no", + "eid.no", + "eidfjord.no", + "eidsberg.no", + "eidskog.no", + "eidsvoll.no", + "eigersund.no", + "elverum.no", + "enebakk.no", + "engerdal.no", + "etne.no", + "etnedal.no", + "evenes.no", + "evenassi.no", + "xn--eveni-0qa01ga.no", + "evje-og-hornnes.no", + "farsund.no", + "fauske.no", + "fuossko.no", + "fuoisku.no", + "fedje.no", + "fet.no", + "finnoy.no", + "xn--finny-yua.no", + "fitjar.no", + "fjaler.no", + "fjell.no", + "flakstad.no", + "flatanger.no", + "flekkefjord.no", + "flesberg.no", + "flora.no", + "fla.no", + "xn--fl-zia.no", + "folldal.no", + "forsand.no", + "fosnes.no", + "frei.no", + "frogn.no", + "froland.no", + "frosta.no", + "frana.no", + "xn--frna-woa.no", + "froya.no", + "xn--frya-hra.no", + "fusa.no", + "fyresdal.no", + "forde.no", + "xn--frde-gra.no", + "gamvik.no", + "gangaviika.no", + "xn--ggaviika-8ya47h.no", + "gaular.no", + "gausdal.no", + "gildeskal.no", + "xn--gildeskl-g0a.no", + "giske.no", + "gjemnes.no", + "gjerdrum.no", + "gjerstad.no", + "gjesdal.no", + "gjovik.no", + "xn--gjvik-wua.no", + "gloppen.no", + "gol.no", + "gran.no", + "grane.no", + "granvin.no", + "gratangen.no", + "grimstad.no", + "grong.no", + "kraanghke.no", + "xn--kranghke-b0a.no", + "grue.no", + "gulen.no", + "hadsel.no", + "halden.no", + "halsa.no", + "hamar.no", + "hamaroy.no", + "habmer.no", + "xn--hbmer-xqa.no", + "hapmir.no", + "xn--hpmir-xqa.no", + "hammerfest.no", + "hammarfeasta.no", + "xn--hmmrfeasta-s4ac.no", + "haram.no", + "hareid.no", + "harstad.no", + "hasvik.no", + "aknoluokta.no", + "xn--koluokta-7ya57h.no", + "hattfjelldal.no", + "aarborte.no", + "haugesund.no", + "hemne.no", + "hemnes.no", + "hemsedal.no", + "heroy.more-og-romsdal.no", + "xn--hery-ira.xn--mre-og-romsdal-qqb.no", + "heroy.nordland.no", + "xn--hery-ira.nordland.no", + "hitra.no", + "hjartdal.no", + "hjelmeland.no", + "hobol.no", + "xn--hobl-ira.no", + "hof.no", + "hol.no", + "hole.no", + "holmestrand.no", + "holtalen.no", + "xn--holtlen-hxa.no", + "hornindal.no", + "horten.no", + "hurdal.no", + "hurum.no", + "hvaler.no", + "hyllestad.no", + "hagebostad.no", + "xn--hgebostad-g3a.no", + "hoyanger.no", + "xn--hyanger-q1a.no", + "hoylandet.no", + "xn--hylandet-54a.no", + "ha.no", + "xn--h-2fa.no", + "ibestad.no", + "inderoy.no", + "xn--indery-fya.no", + "iveland.no", + "jevnaker.no", + "jondal.no", + "jolster.no", + "xn--jlster-bya.no", + "karasjok.no", + "karasjohka.no", + "xn--krjohka-hwab49j.no", + "karlsoy.no", + "galsa.no", + "xn--gls-elac.no", + "karmoy.no", + "xn--karmy-yua.no", + "kautokeino.no", + "guovdageaidnu.no", + "klepp.no", + "klabu.no", + "xn--klbu-woa.no", + "kongsberg.no", + "kongsvinger.no", + "kragero.no", + "xn--krager-gya.no", + "kristiansand.no", + "kristiansund.no", + "krodsherad.no", + "xn--krdsherad-m8a.no", + "kvalsund.no", + "rahkkeravju.no", + "xn--rhkkervju-01af.no", + "kvam.no", + "kvinesdal.no", + "kvinnherad.no", + "kviteseid.no", + "kvitsoy.no", + "xn--kvitsy-fya.no", + "kvafjord.no", + "xn--kvfjord-nxa.no", + "giehtavuoatna.no", + "kvanangen.no", + "xn--kvnangen-k0a.no", + "navuotna.no", + "xn--nvuotna-hwa.no", + "kafjord.no", + "xn--kfjord-iua.no", + "gaivuotna.no", + "xn--givuotna-8ya.no", + "larvik.no", + "lavangen.no", + "lavagis.no", + "loabat.no", + "xn--loabt-0qa.no", + "lebesby.no", + "davvesiida.no", + "leikanger.no", + "leirfjord.no", + "leka.no", + "leksvik.no", + "lenvik.no", + "leangaviika.no", + "xn--leagaviika-52b.no", + "lesja.no", + "levanger.no", + "lier.no", + "lierne.no", + "lillehammer.no", + "lillesand.no", + "lindesnes.no", + "lindas.no", + "xn--linds-pra.no", + "lom.no", + "loppa.no", + "lahppi.no", + "xn--lhppi-xqa.no", + "lund.no", + "lunner.no", + "luroy.no", + "xn--lury-ira.no", + "luster.no", + "lyngdal.no", + "lyngen.no", + "ivgu.no", + "lardal.no", + "lerdal.no", + "xn--lrdal-sra.no", + "lodingen.no", + "xn--ldingen-q1a.no", + "lorenskog.no", + "xn--lrenskog-54a.no", + "loten.no", + "xn--lten-gra.no", + "malvik.no", + "masoy.no", + "xn--msy-ula0h.no", + "muosat.no", + "xn--muost-0qa.no", + "mandal.no", + "marker.no", + "marnardal.no", + "masfjorden.no", + "meland.no", + "meldal.no", + "melhus.no", + "meloy.no", + "xn--mely-ira.no", + "meraker.no", + "xn--merker-kua.no", + "moareke.no", + "xn--moreke-jua.no", + "midsund.no", + "midtre-gauldal.no", + "modalen.no", + "modum.no", + "molde.no", + "moskenes.no", + "moss.no", + "mosvik.no", + "malselv.no", + "xn--mlselv-iua.no", + "malatvuopmi.no", + "xn--mlatvuopmi-s4a.no", + "namdalseid.no", + "aejrie.no", + "namsos.no", + "namsskogan.no", + "naamesjevuemie.no", + "xn--nmesjevuemie-tcba.no", + "laakesvuemie.no", + "nannestad.no", + "narvik.no", + "narviika.no", + "naustdal.no", + "nedre-eiker.no", + "nes.akershus.no", + "nes.buskerud.no", + "nesna.no", + "nesodden.no", + "nesseby.no", + "unjarga.no", + "xn--unjrga-rta.no", + "nesset.no", + "nissedal.no", + "nittedal.no", + "nord-aurdal.no", + "nord-fron.no", + "nord-odal.no", + "norddal.no", + "nordkapp.no", + "davvenjarga.no", + "xn--davvenjrga-y4a.no", + "nordre-land.no", + "nordreisa.no", + "raisa.no", + "xn--risa-5na.no", + "nore-og-uvdal.no", + "notodden.no", + "naroy.no", + "xn--nry-yla5g.no", + "notteroy.no", + "xn--nttery-byae.no", + "odda.no", + "oksnes.no", + "xn--ksnes-uua.no", + "oppdal.no", + "oppegard.no", + "xn--oppegrd-ixa.no", + "orkdal.no", + "orland.no", + "xn--rland-uua.no", + "orskog.no", + "xn--rskog-uua.no", + "orsta.no", + "xn--rsta-fra.no", + "os.hedmark.no", + "os.hordaland.no", + "osen.no", + "osteroy.no", + "xn--ostery-fya.no", + "ostre-toten.no", + "xn--stre-toten-zcb.no", + "overhalla.no", + "ovre-eiker.no", + "xn--vre-eiker-k8a.no", + "oyer.no", + "xn--yer-zna.no", + "oygarden.no", + "xn--ygarden-p1a.no", + "oystre-slidre.no", + "xn--ystre-slidre-ujb.no", + "porsanger.no", + "porsangu.no", + "xn--porsgu-sta26f.no", + "porsgrunn.no", + "radoy.no", + "xn--rady-ira.no", + "rakkestad.no", + "rana.no", + "ruovat.no", + "randaberg.no", + "rauma.no", + "rendalen.no", + "rennebu.no", + "rennesoy.no", + "xn--rennesy-v1a.no", + "rindal.no", + "ringebu.no", + "ringerike.no", + "ringsaker.no", + "rissa.no", + "risor.no", + "xn--risr-ira.no", + "roan.no", + "rollag.no", + "rygge.no", + "ralingen.no", + "xn--rlingen-mxa.no", + "rodoy.no", + "xn--rdy-0nab.no", + "romskog.no", + "xn--rmskog-bya.no", + "roros.no", + "xn--rros-gra.no", + "rost.no", + "xn--rst-0na.no", + "royken.no", + "xn--ryken-vua.no", + "royrvik.no", + "xn--ryrvik-bya.no", + "rade.no", + "xn--rde-ula.no", + "salangen.no", + "siellak.no", + "saltdal.no", + "salat.no", + "xn--slt-elab.no", + "xn--slat-5na.no", + "samnanger.no", + "sande.more-og-romsdal.no", + "sande.xn--mre-og-romsdal-qqb.no", + "sande.vestfold.no", + "sandefjord.no", + "sandnes.no", + "sandoy.no", + "xn--sandy-yua.no", + "sarpsborg.no", + "sauda.no", + "sauherad.no", + "sel.no", + "selbu.no", + "selje.no", + "seljord.no", + "sigdal.no", + "siljan.no", + "sirdal.no", + "skaun.no", + "skedsmo.no", + "ski.no", + "skien.no", + "skiptvet.no", + "skjervoy.no", + "xn--skjervy-v1a.no", + "skierva.no", + "xn--skierv-uta.no", + "skjak.no", + "xn--skjk-soa.no", + "skodje.no", + "skanland.no", + "xn--sknland-fxa.no", + "skanit.no", + "xn--sknit-yqa.no", + "smola.no", + "xn--smla-hra.no", + "snillfjord.no", + "snasa.no", + "xn--snsa-roa.no", + "snoasa.no", + "snaase.no", + "xn--snase-nra.no", + "sogndal.no", + "sokndal.no", + "sola.no", + "solund.no", + "songdalen.no", + "sortland.no", + "spydeberg.no", + "stange.no", + "stavanger.no", + "steigen.no", + "steinkjer.no", + "stjordal.no", + "xn--stjrdal-s1a.no", + "stokke.no", + "stor-elvdal.no", + "stord.no", + "stordal.no", + "storfjord.no", + "omasvuotna.no", + "strand.no", + "stranda.no", + "stryn.no", + "sula.no", + "suldal.no", + "sund.no", + "sunndal.no", + "surnadal.no", + "sveio.no", + "svelvik.no", + "sykkylven.no", + "sogne.no", + "xn--sgne-gra.no", + "somna.no", + "xn--smna-gra.no", + "sondre-land.no", + "xn--sndre-land-0cb.no", + "sor-aurdal.no", + "xn--sr-aurdal-l8a.no", + "sor-fron.no", + "xn--sr-fron-q1a.no", + "sor-odal.no", + "xn--sr-odal-q1a.no", + "sor-varanger.no", + "xn--sr-varanger-ggb.no", + "matta-varjjat.no", + "xn--mtta-vrjjat-k7af.no", + "sorfold.no", + "xn--srfold-bya.no", + "sorreisa.no", + "xn--srreisa-q1a.no", + "sorum.no", + "xn--srum-gra.no", + "tana.no", + "deatnu.no", + "time.no", + "tingvoll.no", + "tinn.no", + "tjeldsund.no", + "dielddanuorri.no", + "tjome.no", + "xn--tjme-hra.no", + "tokke.no", + "tolga.no", + "torsken.no", + "tranoy.no", + "xn--trany-yua.no", + "tromso.no", + "xn--troms-zua.no", + "tromsa.no", + "romsa.no", + "trondheim.no", + "troandin.no", + "trysil.no", + "trana.no", + "xn--trna-woa.no", + "trogstad.no", + "xn--trgstad-r1a.no", + "tvedestrand.no", + "tydal.no", + "tynset.no", + "tysfjord.no", + "divtasvuodna.no", + "divttasvuotna.no", + "tysnes.no", + "tysvar.no", + "xn--tysvr-vra.no", + "tonsberg.no", + "xn--tnsberg-q1a.no", + "ullensaker.no", + "ullensvang.no", + "ulvik.no", + "utsira.no", + "vadso.no", + "xn--vads-jra.no", + "cahcesuolo.no", + "xn--hcesuolo-7ya35b.no", + "vaksdal.no", + "valle.no", + "vang.no", + "vanylven.no", + "vardo.no", + "xn--vard-jra.no", + "varggat.no", + "xn--vrggt-xqad.no", + "vefsn.no", + "vaapste.no", + "vega.no", + "vegarshei.no", + "xn--vegrshei-c0a.no", + "vennesla.no", + "verdal.no", + "verran.no", + "vestby.no", + "vestnes.no", + "vestre-slidre.no", + "vestre-toten.no", + "vestvagoy.no", + "xn--vestvgy-ixa6o.no", + "vevelstad.no", + "vik.no", + "vikna.no", + "vindafjord.no", + "volda.no", + "voss.no", + "varoy.no", + "xn--vry-yla5g.no", + "vagan.no", + "xn--vgan-qoa.no", + "voagat.no", + "vagsoy.no", + "xn--vgsy-qoa0j.no", + "vaga.no", + "xn--vg-yiab.no", + "valer.ostfold.no", + "xn--vler-qoa.xn--stfold-9xa.no", + "valer.hedmark.no", + "xn--vler-qoa.hedmark.no", + "*.np", + "nr", + "biz.nr", + "info.nr", + "gov.nr", + "edu.nr", + "org.nr", + "net.nr", + "com.nr", + "nu", + "nz", + "ac.nz", + "co.nz", + "cri.nz", + "geek.nz", + "gen.nz", + "govt.nz", + "health.nz", + "iwi.nz", + "kiwi.nz", + "maori.nz", + "mil.nz", + "xn--mori-qsa.nz", + "net.nz", + "org.nz", + "parliament.nz", + "school.nz", + "om", + "co.om", + "com.om", + "edu.om", + "gov.om", + "med.om", + "museum.om", + "net.om", + "org.om", + "pro.om", + "org", + "pa", + "ac.pa", + "gob.pa", + "com.pa", + "org.pa", + "sld.pa", + "edu.pa", + "net.pa", + "ing.pa", + "abo.pa", + "med.pa", + "nom.pa", + "pe", + "edu.pe", + "gob.pe", + "nom.pe", + "mil.pe", + "org.pe", + "com.pe", + "net.pe", + "pf", + "com.pf", + "org.pf", + "edu.pf", + "*.pg", + "ph", + "com.ph", + "net.ph", + "org.ph", + "gov.ph", + "edu.ph", + "ngo.ph", + "mil.ph", + "i.ph", + "pk", + "com.pk", + "net.pk", + "edu.pk", + "org.pk", + "fam.pk", + "biz.pk", + "web.pk", + "gov.pk", + "gob.pk", + "gok.pk", + "gon.pk", + "gop.pk", + "gos.pk", + "info.pk", + "pl", + "com.pl", + "net.pl", + "org.pl", + "aid.pl", + "agro.pl", + "atm.pl", + "auto.pl", + "biz.pl", + "edu.pl", + "gmina.pl", + "gsm.pl", + "info.pl", + "mail.pl", + "miasta.pl", + "media.pl", + "mil.pl", + "nieruchomosci.pl", + "nom.pl", + "pc.pl", + "powiat.pl", + "priv.pl", + "realestate.pl", + "rel.pl", + "sex.pl", + "shop.pl", + "sklep.pl", + "sos.pl", + "szkola.pl", + "targi.pl", + "tm.pl", + "tourism.pl", + "travel.pl", + "turystyka.pl", + "gov.pl", + "ap.gov.pl", + "ic.gov.pl", + "is.gov.pl", + "us.gov.pl", + "kmpsp.gov.pl", + "kppsp.gov.pl", + "kwpsp.gov.pl", + "psp.gov.pl", + "wskr.gov.pl", + "kwp.gov.pl", + "mw.gov.pl", + "ug.gov.pl", + "um.gov.pl", + "umig.gov.pl", + "ugim.gov.pl", + "upow.gov.pl", + "uw.gov.pl", + "starostwo.gov.pl", + "pa.gov.pl", + "po.gov.pl", + "psse.gov.pl", + "pup.gov.pl", + "rzgw.gov.pl", + "sa.gov.pl", + "so.gov.pl", + "sr.gov.pl", + "wsa.gov.pl", + "sko.gov.pl", + "uzs.gov.pl", + "wiih.gov.pl", + "winb.gov.pl", + "pinb.gov.pl", + "wios.gov.pl", + "witd.gov.pl", + "wzmiuw.gov.pl", + "piw.gov.pl", + "wiw.gov.pl", + "griw.gov.pl", + "wif.gov.pl", + "oum.gov.pl", + "sdn.gov.pl", + "zp.gov.pl", + "uppo.gov.pl", + "mup.gov.pl", + "wuoz.gov.pl", + "konsulat.gov.pl", + "oirm.gov.pl", + "augustow.pl", + "babia-gora.pl", + "bedzin.pl", + "beskidy.pl", + "bialowieza.pl", + "bialystok.pl", + "bielawa.pl", + "bieszczady.pl", + "boleslawiec.pl", + "bydgoszcz.pl", + "bytom.pl", + "cieszyn.pl", + "czeladz.pl", + "czest.pl", + "dlugoleka.pl", + "elblag.pl", + "elk.pl", + "glogow.pl", + "gniezno.pl", + "gorlice.pl", + "grajewo.pl", + "ilawa.pl", + "jaworzno.pl", + "jelenia-gora.pl", + "jgora.pl", + "kalisz.pl", + "kazimierz-dolny.pl", + "karpacz.pl", + "kartuzy.pl", + "kaszuby.pl", + "katowice.pl", + "kepno.pl", + "ketrzyn.pl", + "klodzko.pl", + "kobierzyce.pl", + "kolobrzeg.pl", + "konin.pl", + "konskowola.pl", + "kutno.pl", + "lapy.pl", + "lebork.pl", + "legnica.pl", + "lezajsk.pl", + "limanowa.pl", + "lomza.pl", + "lowicz.pl", + "lubin.pl", + "lukow.pl", + "malbork.pl", + "malopolska.pl", + "mazowsze.pl", + "mazury.pl", + "mielec.pl", + "mielno.pl", + "mragowo.pl", + "naklo.pl", + "nowaruda.pl", + "nysa.pl", + "olawa.pl", + "olecko.pl", + "olkusz.pl", + "olsztyn.pl", + "opoczno.pl", + "opole.pl", + "ostroda.pl", + "ostroleka.pl", + "ostrowiec.pl", + "ostrowwlkp.pl", + "pila.pl", + "pisz.pl", + "podhale.pl", + "podlasie.pl", + "polkowice.pl", + "pomorze.pl", + "pomorskie.pl", + "prochowice.pl", + "pruszkow.pl", + "przeworsk.pl", + "pulawy.pl", + "radom.pl", + "rawa-maz.pl", + "rybnik.pl", + "rzeszow.pl", + "sanok.pl", + "sejny.pl", + "slask.pl", + "slupsk.pl", + "sosnowiec.pl", + "stalowa-wola.pl", + "skoczow.pl", + "starachowice.pl", + "stargard.pl", + "suwalki.pl", + "swidnica.pl", + "swiebodzin.pl", + "swinoujscie.pl", + "szczecin.pl", + "szczytno.pl", + "tarnobrzeg.pl", + "tgory.pl", + "turek.pl", + "tychy.pl", + "ustka.pl", + "walbrzych.pl", + "warmia.pl", + "warszawa.pl", + "waw.pl", + "wegrow.pl", + "wielun.pl", + "wlocl.pl", + "wloclawek.pl", + "wodzislaw.pl", + "wolomin.pl", + "wroclaw.pl", + "zachpomor.pl", + "zagan.pl", + "zarow.pl", + "zgora.pl", + "zgorzelec.pl", + "pm", + "pn", + "gov.pn", + "co.pn", + "org.pn", + "edu.pn", + "net.pn", + "post", + "pr", + "com.pr", + "net.pr", + "org.pr", + "gov.pr", + "edu.pr", + "isla.pr", + "pro.pr", + "biz.pr", + "info.pr", + "name.pr", + "est.pr", + "prof.pr", + "ac.pr", + "pro", + "aaa.pro", + "aca.pro", + "acct.pro", + "avocat.pro", + "bar.pro", + "cpa.pro", + "eng.pro", + "jur.pro", + "law.pro", + "med.pro", + "recht.pro", + "ps", + "edu.ps", + "gov.ps", + "sec.ps", + "plo.ps", + "com.ps", + "org.ps", + "net.ps", + "pt", + "net.pt", + "gov.pt", + "org.pt", + "edu.pt", + "int.pt", + "publ.pt", + "com.pt", + "nome.pt", + "pw", + "co.pw", + "ne.pw", + "or.pw", + "ed.pw", + "go.pw", + "belau.pw", + "py", + "com.py", + "coop.py", + "edu.py", + "gov.py", + "mil.py", + "net.py", + "org.py", + "qa", + "com.qa", + "edu.qa", + "gov.qa", + "mil.qa", + "name.qa", + "net.qa", + "org.qa", + "sch.qa", + "re", + "asso.re", + "com.re", + "nom.re", + "ro", + "arts.ro", + "com.ro", + "firm.ro", + "info.ro", + "nom.ro", + "nt.ro", + "org.ro", + "rec.ro", + "store.ro", + "tm.ro", + "www.ro", + "rs", + "ac.rs", + "co.rs", + "edu.rs", + "gov.rs", + "in.rs", + "org.rs", + "ru", + "ac.ru", + "com.ru", + "edu.ru", + "int.ru", + "net.ru", + "org.ru", + "pp.ru", + "adygeya.ru", + "altai.ru", + "amur.ru", + "arkhangelsk.ru", + "astrakhan.ru", + "bashkiria.ru", + "belgorod.ru", + "bir.ru", + "bryansk.ru", + "buryatia.ru", + "cbg.ru", + "chel.ru", + "chelyabinsk.ru", + "chita.ru", + "chukotka.ru", + "chuvashia.ru", + "dagestan.ru", + "dudinka.ru", + "e-burg.ru", + "grozny.ru", + "irkutsk.ru", + "ivanovo.ru", + "izhevsk.ru", + "jar.ru", + "joshkar-ola.ru", + "kalmykia.ru", + "kaluga.ru", + "kamchatka.ru", + "karelia.ru", + "kazan.ru", + "kchr.ru", + "kemerovo.ru", + "khabarovsk.ru", + "khakassia.ru", + "khv.ru", + "kirov.ru", + "koenig.ru", + "komi.ru", + "kostroma.ru", + "krasnoyarsk.ru", + "kuban.ru", + "kurgan.ru", + "kursk.ru", + "lipetsk.ru", + "magadan.ru", + "mari.ru", + "mari-el.ru", + "marine.ru", + "mordovia.ru", + "msk.ru", + "murmansk.ru", + "nalchik.ru", + "nnov.ru", + "nov.ru", + "novosibirsk.ru", + "nsk.ru", + "omsk.ru", + "orenburg.ru", + "oryol.ru", + "palana.ru", + "penza.ru", + "perm.ru", + "ptz.ru", + "rnd.ru", + "ryazan.ru", + "sakhalin.ru", + "samara.ru", + "saratov.ru", + "simbirsk.ru", + "smolensk.ru", + "spb.ru", + "stavropol.ru", + "stv.ru", + "surgut.ru", + "tambov.ru", + "tatarstan.ru", + "tom.ru", + "tomsk.ru", + "tsaritsyn.ru", + "tsk.ru", + "tula.ru", + "tuva.ru", + "tver.ru", + "tyumen.ru", + "udm.ru", + "udmurtia.ru", + "ulan-ude.ru", + "vladikavkaz.ru", + "vladimir.ru", + "vladivostok.ru", + "volgograd.ru", + "vologda.ru", + "voronezh.ru", + "vrn.ru", + "vyatka.ru", + "yakutia.ru", + "yamal.ru", + "yaroslavl.ru", + "yekaterinburg.ru", + "yuzhno-sakhalinsk.ru", + "amursk.ru", + "baikal.ru", + "cmw.ru", + "fareast.ru", + "jamal.ru", + "kms.ru", + "k-uralsk.ru", + "kustanai.ru", + "kuzbass.ru", + "mytis.ru", + "nakhodka.ru", + "nkz.ru", + "norilsk.ru", + "oskol.ru", + "pyatigorsk.ru", + "rubtsovsk.ru", + "snz.ru", + "syzran.ru", + "vdonsk.ru", + "zgrad.ru", + "gov.ru", + "mil.ru", + "test.ru", + "rw", + "gov.rw", + "net.rw", + "edu.rw", + "ac.rw", + "com.rw", + "co.rw", + "int.rw", + "mil.rw", + "gouv.rw", + "sa", + "com.sa", + "net.sa", + "org.sa", + "gov.sa", + "med.sa", + "pub.sa", + "edu.sa", + "sch.sa", + "sb", + "com.sb", + "edu.sb", + "gov.sb", + "net.sb", + "org.sb", + "sc", + "com.sc", + "gov.sc", + "net.sc", + "org.sc", + "edu.sc", + "sd", + "com.sd", + "net.sd", + "org.sd", + "edu.sd", + "med.sd", + "tv.sd", + "gov.sd", + "info.sd", + "se", + "a.se", + "ac.se", + "b.se", + "bd.se", + "brand.se", + "c.se", + "d.se", + "e.se", + "f.se", + "fh.se", + "fhsk.se", + "fhv.se", + "g.se", + "h.se", + "i.se", + "k.se", + "komforb.se", + "kommunalforbund.se", + "komvux.se", + "l.se", + "lanbib.se", + "m.se", + "n.se", + "naturbruksgymn.se", + "o.se", + "org.se", + "p.se", + "parti.se", + "pp.se", + "press.se", + "r.se", + "s.se", + "t.se", + "tm.se", + "u.se", + "w.se", + "x.se", + "y.se", + "z.se", + "sg", + "com.sg", + "net.sg", + "org.sg", + "gov.sg", + "edu.sg", + "per.sg", + "sh", + "com.sh", + "net.sh", + "gov.sh", + "org.sh", + "mil.sh", + "si", + "sj", + "sk", + "sl", + "com.sl", + "net.sl", + "edu.sl", + "gov.sl", + "org.sl", + "sm", + "sn", + "art.sn", + "com.sn", + "edu.sn", + "gouv.sn", + "org.sn", + "perso.sn", + "univ.sn", + "so", + "com.so", + "net.so", + "org.so", + "sr", + "st", + "co.st", + "com.st", + "consulado.st", + "edu.st", + "embaixada.st", + "gov.st", + "mil.st", + "net.st", + "org.st", + "principe.st", + "saotome.st", + "store.st", + "su", + "adygeya.su", + "arkhangelsk.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "dagestan.su", + "grozny.su", + "ivanovo.su", + "kalmykia.su", + "kaluga.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "lenug.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "togliatti.su", + "troitsk.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", + "sv", + "com.sv", + "edu.sv", + "gob.sv", + "org.sv", + "red.sv", + "sx", + "gov.sx", + "sy", + "edu.sy", + "gov.sy", + "net.sy", + "mil.sy", + "com.sy", + "org.sy", + "sz", + "co.sz", + "ac.sz", + "org.sz", + "tc", + "td", + "tel", + "tf", + "tg", + "th", + "ac.th", + "co.th", + "go.th", + "in.th", + "mi.th", + "net.th", + "or.th", + "tj", + "ac.tj", + "biz.tj", + "co.tj", + "com.tj", + "edu.tj", + "go.tj", + "gov.tj", + "int.tj", + "mil.tj", + "name.tj", + "net.tj", + "nic.tj", + "org.tj", + "test.tj", + "web.tj", + "tk", + "tl", + "gov.tl", + "tm", + "com.tm", + "co.tm", + "org.tm", + "net.tm", + "nom.tm", + "gov.tm", + "mil.tm", + "edu.tm", + "tn", + "com.tn", + "ens.tn", + "fin.tn", + "gov.tn", + "ind.tn", + "intl.tn", + "nat.tn", + "net.tn", + "org.tn", + "info.tn", + "perso.tn", + "tourism.tn", + "edunet.tn", + "rnrt.tn", + "rns.tn", + "rnu.tn", + "mincom.tn", + "agrinet.tn", + "defense.tn", + "turen.tn", + "to", + "com.to", + "gov.to", + "net.to", + "org.to", + "edu.to", + "mil.to", + "tr", + "com.tr", + "info.tr", + "biz.tr", + "net.tr", + "org.tr", + "web.tr", + "gen.tr", + "tv.tr", + "av.tr", + "dr.tr", + "bbs.tr", + "name.tr", + "tel.tr", + "gov.tr", + "bel.tr", + "pol.tr", + "mil.tr", + "k12.tr", + "edu.tr", + "kep.tr", + "nc.tr", + "gov.nc.tr", + "travel", + "tt", + "co.tt", + "com.tt", + "org.tt", + "net.tt", + "biz.tt", + "info.tt", + "pro.tt", + "int.tt", + "coop.tt", + "jobs.tt", + "mobi.tt", + "travel.tt", + "museum.tt", + "aero.tt", + "name.tt", + "gov.tt", + "edu.tt", + "tv", + "tw", + "edu.tw", + "gov.tw", + "mil.tw", + "com.tw", + "net.tw", + "org.tw", + "idv.tw", + "game.tw", + "ebiz.tw", + "club.tw", + "xn--zf0ao64a.tw", + "xn--uc0atv.tw", + "xn--czrw28b.tw", + "tz", + "ac.tz", + "co.tz", + "go.tz", + "hotel.tz", + "info.tz", + "me.tz", + "mil.tz", + "mobi.tz", + "ne.tz", + "or.tz", + "sc.tz", + "tv.tz", + "ua", + "com.ua", + "edu.ua", + "gov.ua", + "in.ua", + "net.ua", + "org.ua", + "cherkassy.ua", + "cherkasy.ua", + "chernigov.ua", + "chernihiv.ua", + "chernivtsi.ua", + "chernovtsy.ua", + "ck.ua", + "cn.ua", + "cr.ua", + "crimea.ua", + "cv.ua", + "dn.ua", + "dnepropetrovsk.ua", + "dnipropetrovsk.ua", + "dominic.ua", + "donetsk.ua", + "dp.ua", + "if.ua", + "ivano-frankivsk.ua", + "kh.ua", + "kharkiv.ua", + "kharkov.ua", + "kherson.ua", + "khmelnitskiy.ua", + "khmelnytskyi.ua", + "kiev.ua", + "kirovograd.ua", + "km.ua", + "kr.ua", + "krym.ua", + "ks.ua", + "kv.ua", + "kyiv.ua", + "lg.ua", + "lt.ua", + "lugansk.ua", + "lutsk.ua", + "lv.ua", + "lviv.ua", + "mk.ua", + "mykolaiv.ua", + "nikolaev.ua", + "od.ua", + "odesa.ua", + "odessa.ua", + "pl.ua", + "poltava.ua", + "rivne.ua", + "rovno.ua", + "rv.ua", + "sb.ua", + "sebastopol.ua", + "sevastopol.ua", + "sm.ua", + "sumy.ua", + "te.ua", + "ternopil.ua", + "uz.ua", + "uzhgorod.ua", + "vinnica.ua", + "vinnytsia.ua", + "vn.ua", + "volyn.ua", + "yalta.ua", + "zaporizhzhe.ua", + "zaporizhzhia.ua", + "zhitomir.ua", + "zhytomyr.ua", + "zp.ua", + "zt.ua", + "ug", + "co.ug", + "or.ug", + "ac.ug", + "sc.ug", + "go.ug", + "ne.ug", + "com.ug", + "org.ug", + "uk", + "ac.uk", + "co.uk", + "gov.uk", + "ltd.uk", + "me.uk", + "net.uk", + "nhs.uk", + "org.uk", + "plc.uk", + "police.uk", + "*.sch.uk", + "us", + "dni.us", + "fed.us", + "isa.us", + "kids.us", + "nsn.us", + "ak.us", + "al.us", + "ar.us", + "as.us", + "az.us", + "ca.us", + "co.us", + "ct.us", + "dc.us", + "de.us", + "fl.us", + "ga.us", + "gu.us", + "hi.us", + "ia.us", + "id.us", + "il.us", + "in.us", + "ks.us", + "ky.us", + "la.us", + "ma.us", + "md.us", + "me.us", + "mi.us", + "mn.us", + "mo.us", + "ms.us", + "mt.us", + "nc.us", + "nd.us", + "ne.us", + "nh.us", + "nj.us", + "nm.us", + "nv.us", + "ny.us", + "oh.us", + "ok.us", + "or.us", + "pa.us", + "pr.us", + "ri.us", + "sc.us", + "sd.us", + "tn.us", + "tx.us", + "ut.us", + "vi.us", + "vt.us", + "va.us", + "wa.us", + "wi.us", + "wv.us", + "wy.us", + "k12.ak.us", + "k12.al.us", + "k12.ar.us", + "k12.as.us", + "k12.az.us", + "k12.ca.us", + "k12.co.us", + "k12.ct.us", + "k12.dc.us", + "k12.de.us", + "k12.fl.us", + "k12.ga.us", + "k12.gu.us", + "k12.ia.us", + "k12.id.us", + "k12.il.us", + "k12.in.us", + "k12.ks.us", + "k12.ky.us", + "k12.la.us", + "k12.ma.us", + "k12.md.us", + "k12.me.us", + "k12.mi.us", + "k12.mn.us", + "k12.mo.us", + "k12.ms.us", + "k12.mt.us", + "k12.nc.us", + "k12.ne.us", + "k12.nh.us", + "k12.nj.us", + "k12.nm.us", + "k12.nv.us", + "k12.ny.us", + "k12.oh.us", + "k12.ok.us", + "k12.or.us", + "k12.pa.us", + "k12.pr.us", + "k12.ri.us", + "k12.sc.us", + "k12.tn.us", + "k12.tx.us", + "k12.ut.us", + "k12.vi.us", + "k12.vt.us", + "k12.va.us", + "k12.wa.us", + "k12.wi.us", + "k12.wy.us", + "cc.ak.us", + "cc.al.us", + "cc.ar.us", + "cc.as.us", + "cc.az.us", + "cc.ca.us", + "cc.co.us", + "cc.ct.us", + "cc.dc.us", + "cc.de.us", + "cc.fl.us", + "cc.ga.us", + "cc.gu.us", + "cc.hi.us", + "cc.ia.us", + "cc.id.us", + "cc.il.us", + "cc.in.us", + "cc.ks.us", + "cc.ky.us", + "cc.la.us", + "cc.ma.us", + "cc.md.us", + "cc.me.us", + "cc.mi.us", + "cc.mn.us", + "cc.mo.us", + "cc.ms.us", + "cc.mt.us", + "cc.nc.us", + "cc.nd.us", + "cc.ne.us", + "cc.nh.us", + "cc.nj.us", + "cc.nm.us", + "cc.nv.us", + "cc.ny.us", + "cc.oh.us", + "cc.ok.us", + "cc.or.us", + "cc.pa.us", + "cc.pr.us", + "cc.ri.us", + "cc.sc.us", + "cc.sd.us", + "cc.tn.us", + "cc.tx.us", + "cc.ut.us", + "cc.vi.us", + "cc.vt.us", + "cc.va.us", + "cc.wa.us", + "cc.wi.us", + "cc.wv.us", + "cc.wy.us", + "lib.ak.us", + "lib.al.us", + "lib.ar.us", + "lib.as.us", + "lib.az.us", + "lib.ca.us", + "lib.co.us", + "lib.ct.us", + "lib.dc.us", + "lib.de.us", + "lib.fl.us", + "lib.ga.us", + "lib.gu.us", + "lib.hi.us", + "lib.ia.us", + "lib.id.us", + "lib.il.us", + "lib.in.us", + "lib.ks.us", + "lib.ky.us", + "lib.la.us", + "lib.ma.us", + "lib.md.us", + "lib.me.us", + "lib.mi.us", + "lib.mn.us", + "lib.mo.us", + "lib.ms.us", + "lib.mt.us", + "lib.nc.us", + "lib.nd.us", + "lib.ne.us", + "lib.nh.us", + "lib.nj.us", + "lib.nm.us", + "lib.nv.us", + "lib.ny.us", + "lib.oh.us", + "lib.ok.us", + "lib.or.us", + "lib.pa.us", + "lib.pr.us", + "lib.ri.us", + "lib.sc.us", + "lib.sd.us", + "lib.tn.us", + "lib.tx.us", + "lib.ut.us", + "lib.vi.us", + "lib.vt.us", + "lib.va.us", + "lib.wa.us", + "lib.wi.us", + "lib.wy.us", + "pvt.k12.ma.us", + "chtr.k12.ma.us", + "paroch.k12.ma.us", + "uy", + "com.uy", + "edu.uy", + "gub.uy", + "mil.uy", + "net.uy", + "org.uy", + "uz", + "co.uz", + "com.uz", + "net.uz", + "org.uz", + "va", + "vc", + "com.vc", + "net.vc", + "org.vc", + "gov.vc", + "mil.vc", + "edu.vc", + "ve", + "arts.ve", + "co.ve", + "com.ve", + "e12.ve", + "edu.ve", + "firm.ve", + "gob.ve", + "gov.ve", + "info.ve", + "int.ve", + "mil.ve", + "net.ve", + "org.ve", + "rec.ve", + "store.ve", + "tec.ve", + "web.ve", + "vg", + "vi", + "co.vi", + "com.vi", + "k12.vi", + "net.vi", + "org.vi", + "vn", + "com.vn", + "net.vn", + "org.vn", + "edu.vn", + "gov.vn", + "int.vn", + "ac.vn", + "biz.vn", + "info.vn", + "name.vn", + "pro.vn", + "health.vn", + "vu", + "com.vu", + "edu.vu", + "net.vu", + "org.vu", + "wf", + "ws", + "com.ws", + "net.ws", + "org.ws", + "gov.ws", + "edu.ws", + "yt", + "xn--mgbaam7a8h", + "xn--y9a3aq", + "xn--54b7fta0cc", + "xn--90ais", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--lgbbat1ad8j", + "xn--wgbh1c", + "xn--e1a4c", + "xn--node", + "xn--qxam", + "xn--j6w193g", + "xn--h2brj9c", + "xn--mgbbh1a71e", + "xn--fpcrj9c3d", + "xn--gecrj9c", + "xn--s9brj9c", + "xn--45brj9c", + "xn--xkc2dl3a5ee0h", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgbtx2b", + "xn--mgbayh7gpa", + "xn--3e0b707e", + "xn--80ao21a", + "xn--fzc2c9e2c", + "xn--xkc2al3hye2a", + "xn--mgbc0a9azcg", + "xn--d1alf", + "xn--l1acc", + "xn--mix891f", + "xn--mix082f", + "xn--mgbx4cd0ab", + "xn--mgb9awbf", + "xn--mgbai9azgqp6j", + "xn--mgbai9a5eva00b", + "xn--ygbi2ammx", + "xn--90a3ac", + "xn--o1ac.xn--90a3ac", + "xn--c1avg.xn--90a3ac", + "xn--90azh.xn--90a3ac", + "xn--d1at.xn--90a3ac", + "xn--o1ach.xn--90a3ac", + "xn--80au.xn--90a3ac", + "xn--p1ai", + "xn--wgbl6a", + "xn--mgberp4a5d4ar", + "xn--mgberp4a5d4a87g", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbpl2fh", + "xn--yfro4i67o", + "xn--clchc0ea0b2g2a9gcd", + "xn--ogbpf8fl", + "xn--mgbtf8fl", + "xn--o3cw4h", + "xn--pgbs0dh", + "xn--kpry57d", + "xn--kprw13d", + "xn--nnx388a", + "xn--j1amh", + "xn--mgb2ddes", + "xxx", + "*.ye", + "ac.za", + "agric.za", + "alt.za", + "co.za", + "edu.za", + "gov.za", + "grondar.za", + "law.za", + "mil.za", + "net.za", + "ngo.za", + "nis.za", + "nom.za", + "org.za", + "school.za", + "tm.za", + "web.za", + "zm", + "ac.zm", + "biz.zm", + "co.zm", + "com.zm", + "edu.zm", + "gov.zm", + "info.zm", + "mil.zm", + "net.zm", + "org.zm", + "sch.zm", + "*.zw", + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "adac", + "ads", + "adult", + "aeg", + "aetna", + "afamilycompany", + "afl", + "africa", + "africamagic", + "agakhan", + "agency", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "aol", + "apartments", + "app", + "apple", + "aquarelle", + "arab", + "aramco", + "archi", + "army", + "art", + "arte", + "asda", + "associates", + "athleta", + "attorney", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aws", + "axa", + "azure", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bharti", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bms", + "bmw", + "bnl", + "bnpparibas", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bzh", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "ceb", + "center", + "ceo", + "cern", + "cfa", + "cfd", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "chloe", + "christmas", + "chrome", + "chrysler", + "church", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "coach", + "codes", + "coffee", + "college", + "cologne", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cuisinella", + "cymru", + "cyou", + "dabur", + "dad", + "dance", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dnp", + "docs", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dstv", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dwg", + "earth", + "eat", + "edeka", + "education", + "email", + "emerck", + "emerson", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "ericsson", + "erni", + "esq", + "estate", + "esurance", + "etisalat", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "flsmidth", + "fly", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gbiz", + "gdn", + "gea", + "gent", + "genting", + "george", + "ggee", + "gift", + "gifts", + "gives", + "giving", + "glade", + "glass", + "gle", + "global", + "globo", + "gmail", + "gmbh", + "gmo", + "gmx", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gotv", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "group", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hkt", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hsbc", + "htc", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "ieee", + "ifm", + "iinet", + "ikano", + "imamat", + "imdb", + "immo", + "immobilien", + "industries", + "infiniti", + "ing", + "ink", + "institute", + "insurance", + "insure", + "intel", + "international", + "intuit", + "investments", + "ipiranga", + "irish", + "iselect", + "ismaili", + "ist", + "istanbul", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jmp", + "jnj", + "joburg", + "jot", + "joy", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "koeln", + "komatsu", + "kosher", + "kpmg", + "kpn", + "krd", + "kred", + "kuokgroup", + "kyknet", + "kyoto", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "ltd", + "ltda", + "lundbeck", + "lupin", + "luxe", + "luxury", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mcd", + "mcdonalds", + "mckinsey", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "metlife", + "miami", + "microsoft", + "mini", + "mint", + "mit", + "mitsubishi", + "mlb", + "mls", + "mma", + "mnet", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "montblanc", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "msd", + "mtn", + "mtpc", + "mtr", + "multichoice", + "mutual", + "mutuelle", + "mzansimagic", + "nab", + "nadex", + "nagoya", + "naspers", + "nationwide", + "natura", + "navy", + "nba", + "nec", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nfl", + "ngo", + "nhk", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "nra", + "nrw", + "ntt", + "nyc", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "organic", + "orientexpress", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "page", + "pamperedchef", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "payu", + "pccw", + "pet", + "pfizer", + "pharmacy", + "philips", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "pramerica", + "praxi", + "press", + "prime", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "pub", + "pwc", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "raid", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rsvp", + "ruhr", + "run", + "rwe", + "ryukyu", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sbi", + "sbs", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "silk", + "sina", + "singles", + "site", + "ski", + "skin", + "sky", + "skype", + "sling", + "smart", + "smile", + "sncf", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "srl", + "srt", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "sucks", + "supersport", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "swatch", + "swiftcover", + "swiss", + "sydney", + "symantec", + "systems", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tci", + "tdk", + "team", + "tech", + "technology", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "thd", + "theater", + "theatre", + "theguardian", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tjmaxx", + "tjx", + "tkmaxx", + "tmall", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "trade", + "trading", + "training", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tube", + "tui", + "tunes", + "tushu", + "tvs", + "ubank", + "ubs", + "uconnect", + "unicom", + "university", + "uno", + "uol", + "ups", + "vacations", + "vana", + "vanguard", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45q11c", + "xn--4gbrim", + "xn--4gq48lf9j", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fjq720a", + "xn--flw351e", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gk3at1e", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kpu716f", + "xn--kput3i", + "xn--mgba3a3ejt", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbab2bd", + "xn--mgbb9fbpob", + "xn--mgbca7dzdo", + "xn--mgbi4ecexp", + "xn--mgbt3dhd", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--p1acf", + "xn--pbt977c", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--rhqv96g", + "xn--rovu88b", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--xhq521b", + "xn--zfr164b", + "xperia", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yun", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zone", + "zuerich", + "beep.pl", + "*.compute.estate", + "*.alces.network", + "cloudfront.net", + "compute.amazonaws.com", + "ap-northeast-1.compute.amazonaws.com", + "ap-northeast-2.compute.amazonaws.com", + "ap-southeast-1.compute.amazonaws.com", + "ap-southeast-2.compute.amazonaws.com", + "eu-central-1.compute.amazonaws.com", + "eu-west-1.compute.amazonaws.com", + "sa-east-1.compute.amazonaws.com", + "us-gov-west-1.compute.amazonaws.com", + "us-west-1.compute.amazonaws.com", + "us-west-2.compute.amazonaws.com", + "us-east-1.amazonaws.com", + "compute-1.amazonaws.com", + "z-1.compute-1.amazonaws.com", + "z-2.compute-1.amazonaws.com", + "compute.amazonaws.com.cn", + "cn-north-1.compute.amazonaws.com.cn", + "elasticbeanstalk.com", + "elb.amazonaws.com", + "s3.amazonaws.com", + "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", + "s3-ap-southeast-1.amazonaws.com", + "s3-ap-southeast-2.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-external-2.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", + "s3-sa-east-1.amazonaws.com", + "s3-us-gov-west-1.amazonaws.com", + "s3-us-west-1.amazonaws.com", + "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", + "s3.cn-north-1.amazonaws.com.cn", + "s3.eu-central-1.amazonaws.com", + "on-aptible.com", + "potager.org", + "poivron.org", + "sweetpepper.org", + "pimienta.org", + "myfritz.net", + "betainabox.com", + "boxfuse.io", + "mycd.eu", + "ae.org", + "ar.com", + "br.com", + "cn.com", + "com.de", + "com.se", + "de.com", + "eu.com", + "gb.com", + "gb.net", + "hu.com", + "hu.net", + "jp.net", + "jpn.com", + "kr.com", + "mex.com", + "no.com", + "qc.com", + "ru.com", + "sa.com", + "se.com", + "se.net", + "uk.com", + "uk.net", + "us.com", + "uy.com", + "za.bz", + "za.com", + "africa.com", + "gr.com", + "in.net", + "us.org", + "co.com", + "c.la", + "certmgr.org", + "xenapponazure.com", + "virtueeldomein.nl", + "cloudcontrolled.com", + "cloudcontrolapp.com", + "co.ca", + "co.cz", + "c.cdn77.org", + "cdn77-ssl.net", + "r.cdn77.net", + "rsc.cdn77.org", + "ssl.origin.cdn77-secure.org", + "co.nl", + "co.no", + "*.platform.sh", + "realm.cz", + "*.cryptonomic.net", + "cupcake.is", + "cyon.link", + "cyon.site", + "daplie.me", + "biz.dk", + "co.dk", + "firm.dk", + "reg.dk", + "store.dk", + "dedyn.io", + "dnshome.de", + "dreamhosters.com", + "mydrobo.com", + "duckdns.org", + "dy.fi", + "tunk.org", + "dyndns-at-home.com", + "dyndns-at-work.com", + "dyndns-blog.com", + "dyndns-free.com", + "dyndns-home.com", + "dyndns-ip.com", + "dyndns-mail.com", + "dyndns-office.com", + "dyndns-pics.com", + "dyndns-remote.com", + "dyndns-server.com", + "dyndns-web.com", + "dyndns-wiki.com", + "dyndns-work.com", + "dyndns.biz", + "dyndns.info", + "dyndns.org", + "dyndns.tv", + "at-band-camp.net", + "ath.cx", + "barrel-of-knowledge.info", + "barrell-of-knowledge.info", + "better-than.tv", + "blogdns.com", + "blogdns.net", + "blogdns.org", + "blogsite.org", + "boldlygoingnowhere.org", + "broke-it.net", + "buyshouses.net", + "cechire.com", + "dnsalias.com", + "dnsalias.net", + "dnsalias.org", + "dnsdojo.com", + "dnsdojo.net", + "dnsdojo.org", + "does-it.net", + "doesntexist.com", + "doesntexist.org", + "dontexist.com", + "dontexist.net", + "dontexist.org", + "doomdns.com", + "doomdns.org", + "dvrdns.org", + "dyn-o-saur.com", + "dynalias.com", + "dynalias.net", + "dynalias.org", + "dynathome.net", + "dyndns.ws", + "endofinternet.net", + "endofinternet.org", + "endoftheinternet.org", + "est-a-la-maison.com", + "est-a-la-masion.com", + "est-le-patron.com", + "est-mon-blogueur.com", + "for-better.biz", + "for-more.biz", + "for-our.info", + "for-some.biz", + "for-the.biz", + "forgot.her.name", + "forgot.his.name", + "from-ak.com", + "from-al.com", + "from-ar.com", + "from-az.net", + "from-ca.com", + "from-co.net", + "from-ct.com", + "from-dc.com", + "from-de.com", + "from-fl.com", + "from-ga.com", + "from-hi.com", + "from-ia.com", + "from-id.com", + "from-il.com", + "from-in.com", + "from-ks.com", + "from-ky.com", + "from-la.net", + "from-ma.com", + "from-md.com", + "from-me.org", + "from-mi.com", + "from-mn.com", + "from-mo.com", + "from-ms.com", + "from-mt.com", + "from-nc.com", + "from-nd.com", + "from-ne.com", + "from-nh.com", + "from-nj.com", + "from-nm.com", + "from-nv.com", + "from-ny.net", + "from-oh.com", + "from-ok.com", + "from-or.com", + "from-pa.com", + "from-pr.com", + "from-ri.com", + "from-sc.com", + "from-sd.com", + "from-tn.com", + "from-tx.com", + "from-ut.com", + "from-va.com", + "from-vt.com", + "from-wa.com", + "from-wi.com", + "from-wv.com", + "from-wy.com", + "ftpaccess.cc", + "fuettertdasnetz.de", + "game-host.org", + "game-server.cc", + "getmyip.com", + "gets-it.net", + "go.dyndns.org", + "gotdns.com", + "gotdns.org", + "groks-the.info", + "groks-this.info", + "ham-radio-op.net", + "here-for-more.info", + "hobby-site.com", + "hobby-site.org", + "home.dyndns.org", + "homedns.org", + "homeftp.net", + "homeftp.org", + "homeip.net", + "homelinux.com", + "homelinux.net", + "homelinux.org", + "homeunix.com", + "homeunix.net", + "homeunix.org", + "iamallama.com", + "in-the-band.net", + "is-a-anarchist.com", + "is-a-blogger.com", + "is-a-bookkeeper.com", + "is-a-bruinsfan.org", + "is-a-bulls-fan.com", + "is-a-candidate.org", + "is-a-caterer.com", + "is-a-celticsfan.org", + "is-a-chef.com", + "is-a-chef.net", + "is-a-chef.org", + "is-a-conservative.com", + "is-a-cpa.com", + "is-a-cubicle-slave.com", + "is-a-democrat.com", + "is-a-designer.com", + "is-a-doctor.com", + "is-a-financialadvisor.com", + "is-a-geek.com", + "is-a-geek.net", + "is-a-geek.org", + "is-a-green.com", + "is-a-guru.com", + "is-a-hard-worker.com", + "is-a-hunter.com", + "is-a-knight.org", + "is-a-landscaper.com", + "is-a-lawyer.com", + "is-a-liberal.com", + "is-a-libertarian.com", + "is-a-linux-user.org", + "is-a-llama.com", + "is-a-musician.com", + "is-a-nascarfan.com", + "is-a-nurse.com", + "is-a-painter.com", + "is-a-patsfan.org", + "is-a-personaltrainer.com", + "is-a-photographer.com", + "is-a-player.com", + "is-a-republican.com", + "is-a-rockstar.com", + "is-a-socialist.com", + "is-a-soxfan.org", + "is-a-student.com", + "is-a-teacher.com", + "is-a-techie.com", + "is-a-therapist.com", + "is-an-accountant.com", + "is-an-actor.com", + "is-an-actress.com", + "is-an-anarchist.com", + "is-an-artist.com", + "is-an-engineer.com", + "is-an-entertainer.com", + "is-by.us", + "is-certified.com", + "is-found.org", + "is-gone.com", + "is-into-anime.com", + "is-into-cars.com", + "is-into-cartoons.com", + "is-into-games.com", + "is-leet.com", + "is-lost.org", + "is-not-certified.com", + "is-saved.org", + "is-slick.com", + "is-uberleet.com", + "is-very-bad.org", + "is-very-evil.org", + "is-very-good.org", + "is-very-nice.org", + "is-very-sweet.org", + "is-with-theband.com", + "isa-geek.com", + "isa-geek.net", + "isa-geek.org", + "isa-hockeynut.com", + "issmarterthanyou.com", + "isteingeek.de", + "istmein.de", + "kicks-ass.net", + "kicks-ass.org", + "knowsitall.info", + "land-4-sale.us", + "lebtimnetz.de", + "leitungsen.de", + "likes-pie.com", + "likescandy.com", + "merseine.nu", + "mine.nu", + "misconfused.org", + "mypets.ws", + "myphotos.cc", + "neat-url.com", + "office-on-the.net", + "on-the-web.tv", + "podzone.net", + "podzone.org", + "readmyblog.org", + "saves-the-whales.com", + "scrapper-site.net", + "scrapping.cc", + "selfip.biz", + "selfip.com", + "selfip.info", + "selfip.net", + "selfip.org", + "sells-for-less.com", + "sells-for-u.com", + "sells-it.net", + "sellsyourhome.org", + "servebbs.com", + "servebbs.net", + "servebbs.org", + "serveftp.net", + "serveftp.org", + "servegame.org", + "shacknet.nu", + "simple-url.com", + "space-to-rent.com", + "stuff-4-sale.org", + "stuff-4-sale.us", + "teaches-yoga.com", + "thruhere.net", + "traeumtgerade.de", + "webhop.biz", + "webhop.info", + "webhop.net", + "webhop.org", + "worse-than.tv", + "writesthisblog.com", + "dynv6.net", + "e4.cz", + "eu.org", + "al.eu.org", + "asso.eu.org", + "at.eu.org", + "au.eu.org", + "be.eu.org", + "bg.eu.org", + "ca.eu.org", + "cd.eu.org", + "ch.eu.org", + "cn.eu.org", + "cy.eu.org", + "cz.eu.org", + "de.eu.org", + "dk.eu.org", + "edu.eu.org", + "ee.eu.org", + "es.eu.org", + "fi.eu.org", + "fr.eu.org", + "gr.eu.org", + "hr.eu.org", + "hu.eu.org", + "ie.eu.org", + "il.eu.org", + "in.eu.org", + "int.eu.org", + "is.eu.org", + "it.eu.org", + "jp.eu.org", + "kr.eu.org", + "lt.eu.org", + "lu.eu.org", + "lv.eu.org", + "mc.eu.org", + "me.eu.org", + "mk.eu.org", + "mt.eu.org", + "my.eu.org", + "net.eu.org", + "ng.eu.org", + "nl.eu.org", + "no.eu.org", + "nz.eu.org", + "paris.eu.org", + "pl.eu.org", + "pt.eu.org", + "q-a.eu.org", + "ro.eu.org", + "ru.eu.org", + "se.eu.org", + "si.eu.org", + "sk.eu.org", + "tr.eu.org", + "uk.eu.org", + "us.eu.org", + "eu-1.evennode.com", + "eu-2.evennode.com", + "us-1.evennode.com", + "us-2.evennode.com", + "apps.fbsbx.com", + "a.ssl.fastly.net", + "b.ssl.fastly.net", + "global.ssl.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", + "fhapp.xyz", + "firebaseapp.com", + "flynnhub.com", + "freebox-os.com", + "freeboxos.com", + "fbx-os.fr", + "fbxos.fr", + "freebox-os.fr", + "freeboxos.fr", + "service.gov.uk", + "github.io", + "githubusercontent.com", + "githubcloud.com", + "*.api.githubcloud.com", + "*.ext.githubcloud.com", + "gist.githubcloud.com", + "*.githubcloudusercontent.com", + "ro.com", + "goip.de", + "*.0emm.com", + "appspot.com", + "blogspot.ae", + "blogspot.al", + "blogspot.am", + "blogspot.ba", + "blogspot.be", + "blogspot.bg", + "blogspot.bj", + "blogspot.ca", + "blogspot.cf", + "blogspot.ch", + "blogspot.cl", + "blogspot.co.at", + "blogspot.co.id", + "blogspot.co.il", + "blogspot.co.ke", + "blogspot.co.nz", + "blogspot.co.uk", + "blogspot.co.za", + "blogspot.com", + "blogspot.com.ar", + "blogspot.com.au", + "blogspot.com.br", + "blogspot.com.by", + "blogspot.com.co", + "blogspot.com.cy", + "blogspot.com.ee", + "blogspot.com.eg", + "blogspot.com.es", + "blogspot.com.mt", + "blogspot.com.ng", + "blogspot.com.tr", + "blogspot.com.uy", + "blogspot.cv", + "blogspot.cz", + "blogspot.de", + "blogspot.dk", + "blogspot.fi", + "blogspot.fr", + "blogspot.gr", + "blogspot.hk", + "blogspot.hr", + "blogspot.hu", + "blogspot.ie", + "blogspot.in", + "blogspot.is", + "blogspot.it", + "blogspot.jp", + "blogspot.kr", + "blogspot.li", + "blogspot.lt", + "blogspot.lu", + "blogspot.md", + "blogspot.mk", + "blogspot.mr", + "blogspot.mx", + "blogspot.my", + "blogspot.nl", + "blogspot.no", + "blogspot.pe", + "blogspot.pt", + "blogspot.qa", + "blogspot.re", + "blogspot.ro", + "blogspot.rs", + "blogspot.ru", + "blogspot.se", + "blogspot.sg", + "blogspot.si", + "blogspot.sk", + "blogspot.sn", + "blogspot.td", + "blogspot.tw", + "blogspot.ug", + "blogspot.vn", + "cloudfunctions.net", + "codespot.com", + "googleapis.com", + "googlecode.com", + "pagespeedmobilizer.com", + "withgoogle.com", + "withyoutube.com", + "hashbang.sh", + "hasura-app.io", + "hepforge.org", + "herokuapp.com", + "herokussl.com", + "iki.fi", + "biz.at", + "info.at", + "*.magentosite.cloud", + "meteorapp.com", + "eu.meteorapp.com", + "co.pl", + "azurewebsites.net", + "azure-mobile.net", + "cloudapp.net", + "bmoattachments.org", + "4u.com", + "ngrok.io", + "nfshost.com", + "nsupdate.info", + "nerdpol.ovh", + "blogsyte.com", + "brasilia.me", + "cable-modem.org", + "ciscofreak.com", + "collegefan.org", + "couchpotatofries.org", + "damnserver.com", + "ddns.me", + "ditchyourip.com", + "dnsfor.me", + "dnsiskinky.com", + "dvrcam.info", + "dynns.com", + "eating-organic.net", + "fantasyleague.cc", + "geekgalaxy.com", + "golffan.us", + "health-carereform.com", + "homesecuritymac.com", + "homesecuritypc.com", + "hopto.me", + "ilovecollege.info", + "loginto.me", + "mlbfan.org", + "mmafan.biz", + "myactivedirectory.com", + "mydissent.net", + "myeffect.net", + "mymediapc.net", + "mypsx.net", + "mysecuritycamera.com", + "mysecuritycamera.net", + "mysecuritycamera.org", + "net-freaks.com", + "nflfan.org", + "nhlfan.net", + "no-ip.ca", + "no-ip.co.uk", + "no-ip.net", + "noip.us", + "onthewifi.com", + "pgafan.net", + "point2this.com", + "pointto.us", + "privatizehealthinsurance.net", + "quicksytes.com", + "read-books.org", + "securitytactics.com", + "serveexchange.com", + "servehumour.com", + "servep2p.com", + "servesarcasm.com", + "stufftoread.com", + "ufcfan.org", + "unusualperson.com", + "workisboring.com", + "3utilities.com", + "bounceme.net", + "ddns.net", + "ddnsking.com", + "gotdns.ch", + "hopto.org", + "myftp.biz", + "myftp.org", + "myvnc.com", + "no-ip.biz", + "no-ip.info", + "no-ip.org", + "noip.me", + "redirectme.net", + "servebeer.com", + "serveblog.net", + "servecounterstrike.com", + "serveftp.com", + "servegame.com", + "servehalflife.com", + "servehttp.com", + "serveirc.com", + "serveminecraft.net", + "servemp3.com", + "servepics.com", + "servequake.com", + "sytes.net", + "webhop.me", + "zapto.org", + "nyc.mn", + "nid.io", + "operaunite.com", + "outsystemscloud.com", + "ownprovider.com", + "oy.lc", + "pgfog.com", + "pagefrontapp.com", + "art.pl", + "gliwice.pl", + "krakow.pl", + "poznan.pl", + "wroc.pl", + "zakopane.pl", + "pantheonsite.io", + "gotpantheon.com", + "mypep.link", + "xen.prgmr.com", + "priv.at", + "chirurgiens-dentistes-en-france.fr", + "qa2.com", + "rackmaze.com", + "rackmaze.net", + "rhcloud.com", + "hzc.io", + "sandcats.io", + "logoip.de", + "logoip.com", + "biz.ua", + "co.ua", + "pp.ua", + "myshopblocks.com", + "sinaapp.com", + "vipsinaapp.com", + "1kapp.com", + "bounty-full.com", + "alpha.bounty-full.com", + "beta.bounty-full.com", + "spacekit.io", + "diskstation.me", + "dscloud.biz", + "dscloud.me", + "dscloud.mobi", + "dsmynas.com", + "dsmynas.net", + "dsmynas.org", + "familyds.com", + "familyds.net", + "familyds.org", + "i234.me", + "myds.me", + "synology.me", + "gda.pl", + "gdansk.pl", + "gdynia.pl", + "med.pl", + "sopot.pl", + "bloxcms.com", + "townnews-staging.com", + "tuxfamily.org", + "hk.com", + "hk.org", + "ltd.hk", + "inc.hk", + "router.management", + "yolasite.com", + "za.net", + "za.org", +} + +var nodeLabels = [...]string{ + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "ac", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "ad", + "adac", + "ads", + "adult", + "ae", + "aeg", + "aero", + "aetna", + "af", + "afamilycompany", + "afl", + "africa", + "africamagic", + "ag", + "agakhan", + "agency", + "ai", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "al", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "am", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "ao", + "aol", + "apartments", + "app", + "apple", + "aq", + "aquarelle", + "ar", + "arab", + "aramco", + "archi", + "army", + "arpa", + "art", + "arte", + "as", + "asda", + "asia", + "associates", + "at", + "athleta", + "attorney", + "au", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aw", + "aws", + "ax", + "axa", + "az", + "azure", + "ba", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bb", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "bd", + "be", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bf", + "bg", + "bh", + "bharti", + "bi", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "biz", + "bj", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bm", + "bms", + "bmw", + "bn", + "bnl", + "bnpparibas", + "bo", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "br", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "bs", + "bt", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bv", + "bw", + "by", + "bz", + "bzh", + "ca", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "cat", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "cc", + "cd", + "ceb", + "center", + "ceo", + "cern", + "cf", + "cfa", + "cfd", + "cg", + "ch", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "chloe", + "christmas", + "chrome", + "chrysler", + "church", + "ci", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "ck", + "cl", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "cm", + "cn", + "co", + "coach", + "codes", + "coffee", + "college", + "cologne", + "com", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "coop", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "cr", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cu", + "cuisinella", + "cv", + "cw", + "cx", + "cy", + "cymru", + "cyou", + "cz", + "dabur", + "dad", + "dance", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "de", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dj", + "dk", + "dm", + "dnp", + "do", + "docs", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dstv", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dwg", + "dz", + "earth", + "eat", + "ec", + "edeka", + "edu", + "education", + "ee", + "eg", + "email", + "emerck", + "emerson", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "er", + "ericsson", + "erni", + "es", + "esq", + "estate", + "esurance", + "et", + "etisalat", + "eu", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fi", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "fj", + "fk", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "flsmidth", + "fly", + "fm", + "fo", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "fr", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "ga", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gb", + "gbiz", + "gd", + "gdn", + "ge", + "gea", + "gent", + "genting", + "george", + "gf", + "gg", + "ggee", + "gh", + "gi", + "gift", + "gifts", + "gives", + "giving", + "gl", + "glade", + "glass", + "gle", + "global", + "globo", + "gm", + "gmail", + "gmbh", + "gmo", + "gmx", + "gn", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gotv", + "gov", + "gp", + "gq", + "gr", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "group", + "gs", + "gt", + "gu", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "gw", + "gy", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hk", + "hkt", + "hm", + "hn", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hr", + "hsbc", + "ht", + "htc", + "hu", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "id", + "ie", + "ieee", + "ifm", + "iinet", + "ikano", + "il", + "im", + "imamat", + "imdb", + "immo", + "immobilien", + "in", + "industries", + "infiniti", + "info", + "ing", + "ink", + "institute", + "insurance", + "insure", + "int", + "intel", + "international", + "intuit", + "investments", + "io", + "ipiranga", + "iq", + "ir", + "irish", + "is", + "iselect", + "ismaili", + "ist", + "istanbul", + "it", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "je", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jm", + "jmp", + "jnj", + "jo", + "jobs", + "joburg", + "jot", + "joy", + "jp", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "ke", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kg", + "kh", + "ki", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "km", + "kn", + "koeln", + "komatsu", + "kosher", + "kp", + "kpmg", + "kpn", + "kr", + "krd", + "kred", + "kuokgroup", + "kw", + "ky", + "kyknet", + "kyoto", + "kz", + "la", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lb", + "lc", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "li", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "lk", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "lr", + "ls", + "lt", + "ltd", + "ltda", + "lu", + "lundbeck", + "lupin", + "luxe", + "luxury", + "lv", + "ly", + "ma", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mc", + "mcd", + "mcdonalds", + "mckinsey", + "md", + "me", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "metlife", + "mg", + "mh", + "miami", + "microsoft", + "mil", + "mini", + "mint", + "mit", + "mitsubishi", + "mk", + "ml", + "mlb", + "mls", + "mm", + "mma", + "mn", + "mnet", + "mo", + "mobi", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "montblanc", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "mp", + "mq", + "mr", + "ms", + "msd", + "mt", + "mtn", + "mtpc", + "mtr", + "mu", + "multichoice", + "museum", + "mutual", + "mutuelle", + "mv", + "mw", + "mx", + "my", + "mz", + "mzansimagic", + "na", + "nab", + "nadex", + "nagoya", + "name", + "naspers", + "nationwide", + "natura", + "navy", + "nba", + "nc", + "ne", + "nec", + "net", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nf", + "nfl", + "ng", + "ngo", + "nhk", + "ni", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nl", + "no", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "np", + "nr", + "nra", + "nrw", + "ntt", + "nu", + "nyc", + "nz", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "om", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "org", + "organic", + "orientexpress", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "pa", + "page", + "pamperedchef", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "payu", + "pccw", + "pe", + "pet", + "pf", + "pfizer", + "pg", + "ph", + "pharmacy", + "philips", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "pk", + "pl", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pm", + "pn", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "post", + "pr", + "pramerica", + "praxi", + "press", + "prime", + "pro", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "ps", + "pt", + "pub", + "pw", + "pwc", + "py", + "qa", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "raid", + "re", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "ro", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rs", + "rsvp", + "ru", + "ruhr", + "run", + "rw", + "rwe", + "ryukyu", + "sa", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sb", + "sbi", + "sbs", + "sc", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "sd", + "se", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "sg", + "sh", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "si", + "silk", + "sina", + "singles", + "site", + "sj", + "sk", + "ski", + "skin", + "sky", + "skype", + "sl", + "sling", + "sm", + "smart", + "smile", + "sn", + "sncf", + "so", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "sr", + "srl", + "srt", + "st", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "su", + "sucks", + "supersport", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "sv", + "swatch", + "swiftcover", + "swiss", + "sx", + "sy", + "sydney", + "symantec", + "systems", + "sz", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tc", + "tci", + "td", + "tdk", + "team", + "tech", + "technology", + "tel", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "tf", + "tg", + "th", + "thd", + "theater", + "theatre", + "theguardian", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tj", + "tjmaxx", + "tjx", + "tk", + "tkmaxx", + "tl", + "tm", + "tmall", + "tn", + "to", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "tr", + "trade", + "trading", + "training", + "travel", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tt", + "tube", + "tui", + "tunes", + "tushu", + "tv", + "tvs", + "tw", + "tz", + "ua", + "ubank", + "ubs", + "uconnect", + "ug", + "uk", + "unicom", + "university", + "uno", + "uol", + "ups", + "us", + "uy", + "uz", + "va", + "vacations", + "vana", + "vanguard", + "vc", + "ve", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "vg", + "vi", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vn", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vu", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "wf", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "ws", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3e0b707e", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45brj9c", + "xn--45q11c", + "xn--4gbrim", + "xn--4gq48lf9j", + "xn--54b7fta0cc", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80ao21a", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--90a3ac", + "xn--90ais", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--clchc0ea0b2g2a9gcd", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--d1alf", + "xn--e1a4c", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--fjq720a", + "xn--flw351e", + "xn--fpcrj9c3d", + "xn--fzc2c9e2c", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gecrj9c", + "xn--gk3at1e", + "xn--h2brj9c", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--j1amh", + "xn--j6w193g", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kprw13d", + "xn--kpry57d", + "xn--kpu716f", + "xn--kput3i", + "xn--l1acc", + "xn--lgbbat1ad8j", + "xn--mgb2ddes", + "xn--mgb9awbf", + "xn--mgba3a3ejt", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbaam7a8h", + "xn--mgbab2bd", + "xn--mgbai9a5eva00b", + "xn--mgbai9azgqp6j", + "xn--mgbayh7gpa", + "xn--mgbb9fbpob", + "xn--mgbbh1a71e", + "xn--mgbc0a9azcg", + "xn--mgbca7dzdo", + "xn--mgberp4a5d4a87g", + "xn--mgberp4a5d4ar", + "xn--mgbi4ecexp", + "xn--mgbpl2fh", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbt3dhd", + "xn--mgbtf8fl", + "xn--mgbtx2b", + "xn--mgbx4cd0ab", + "xn--mix082f", + "xn--mix891f", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nnx388a", + "xn--node", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--o3cw4h", + "xn--ogbpf8fl", + "xn--p1acf", + "xn--p1ai", + "xn--pbt977c", + "xn--pgbs0dh", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--qxam", + "xn--rhqv96g", + "xn--rovu88b", + "xn--s9brj9c", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--wgbh1c", + "xn--wgbl6a", + "xn--xhq521b", + "xn--xkc2al3hye2a", + "xn--xkc2dl3a5ee0h", + "xn--y9a3aq", + "xn--yfro4i67o", + "xn--ygbi2ammx", + "xn--zfr164b", + "xperia", + "xxx", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "ye", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yt", + "yun", + "za", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zm", + "zone", + "zuerich", + "zw", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "nom", + "ac", + "blogspot", + "co", + "gov", + "mil", + "net", + "org", + "sch", + "accident-investigation", + "accident-prevention", + "aerobatic", + "aeroclub", + "aerodrome", + "agents", + "air-surveillance", + "air-traffic-control", + "aircraft", + "airline", + "airport", + "airtraffic", + "ambulance", + "amusement", + "association", + "author", + "ballooning", + "broker", + "caa", + "cargo", + "catering", + "certification", + "championship", + "charter", + "civilaviation", + "club", + "conference", + "consultant", + "consulting", + "control", + "council", + "crew", + "design", + "dgca", + "educator", + "emergency", + "engine", + "engineer", + "entertainment", + "equipment", + "exchange", + "express", + "federation", + "flight", + "freight", + "fuel", + "gliding", + "government", + "groundhandling", + "group", + "hanggliding", + "homebuilt", + "insurance", + "journal", + "journalist", + "leasing", + "logistics", + "magazine", + "maintenance", + "media", + "microlight", + "modelling", + "navigation", + "parachuting", + "paragliding", + "passenger-association", + "pilot", + "press", + "production", + "recreation", + "repbody", + "res", + "research", + "rotorcraft", + "safety", + "scientist", + "services", + "show", + "skydiving", + "software", + "student", + "trader", + "trading", + "trainer", + "union", + "workinggroup", + "works", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "net", + "nom", + "org", + "com", + "net", + "off", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "co", + "ed", + "gv", + "it", + "og", + "pb", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "net", + "org", + "tur", + "blogspot", + "e164", + "in-addr", + "ip6", + "iris", + "uri", + "urn", + "gov", + "ac", + "biz", + "co", + "gv", + "info", + "or", + "priv", + "blogspot", + "act", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "info", + "net", + "nsw", + "nt", + "org", + "oz", + "qld", + "sa", + "tas", + "vic", + "wa", + "blogspot", + "act", + "nsw", + "nt", + "qld", + "sa", + "tas", + "vic", + "wa", + "qld", + "sa", + "tas", + "vic", + "wa", + "com", + "biz", + "com", + "edu", + "gov", + "info", + "int", + "mil", + "name", + "net", + "org", + "pp", + "pro", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "net", + "org", + "store", + "tv", + "ac", + "blogspot", + "gov", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "a", + "b", + "blogspot", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "or", + "org", + "dscloud", + "dyndns", + "for-better", + "for-more", + "for-some", + "for-the", + "mmafan", + "myftp", + "no-ip", + "selfip", + "webhop", + "asso", + "barreau", + "blogspot", + "gouv", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "net", + "org", + "tv", + "adm", + "adv", + "agr", + "am", + "arq", + "art", + "ato", + "b", + "bio", + "blog", + "bmd", + "cim", + "cng", + "cnt", + "com", + "coop", + "ecn", + "eco", + "edu", + "emp", + "eng", + "esp", + "etc", + "eti", + "far", + "flog", + "fm", + "fnd", + "fot", + "fst", + "g12", + "ggf", + "gov", + "imb", + "ind", + "inf", + "jor", + "jus", + "leg", + "lel", + "mat", + "med", + "mil", + "mp", + "mus", + "net", + "nom", + "not", + "ntr", + "odo", + "org", + "ppg", + "pro", + "psc", + "psi", + "qsl", + "radio", + "rec", + "slg", + "srv", + "taxi", + "teo", + "tmp", + "trd", + "tur", + "tv", + "vet", + "vlog", + "wiki", + "zlg", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "com", + "gov", + "mil", + "of", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "za", + "ab", + "bc", + "blogspot", + "co", + "gc", + "mb", + "nb", + "nf", + "nl", + "no-ip", + "ns", + "nt", + "nu", + "on", + "pe", + "qc", + "sk", + "yk", + "fantasyleague", + "ftpaccess", + "game-server", + "myphotos", + "scrapping", + "gov", + "blogspot", + "blogspot", + "gotdns", + "ac", + "asso", + "co", + "com", + "ed", + "edu", + "go", + "gouv", + "int", + "md", + "net", + "or", + "org", + "presse", + "xn--aroport-bya", + "www", + "blogspot", + "co", + "gob", + "gov", + "mil", + "magentosite", + "co", + "com", + "gov", + "net", + "ac", + "ah", + "bj", + "com", + "cq", + "edu", + "fj", + "gd", + "gov", + "gs", + "gx", + "gz", + "ha", + "hb", + "he", + "hi", + "hk", + "hl", + "hn", + "jl", + "js", + "jx", + "ln", + "mil", + "mo", + "net", + "nm", + "nx", + "org", + "qh", + "sc", + "sd", + "sh", + "sn", + "sx", + "tj", + "tw", + "xj", + "xn--55qx5d", + "xn--io0a7i", + "xn--od0alg", + "xz", + "yn", + "zj", + "amazonaws", + "cn-north-1", + "compute", + "s3", + "cn-north-1", + "arts", + "com", + "edu", + "firm", + "gov", + "info", + "int", + "mil", + "net", + "nom", + "org", + "rec", + "web", + "blogspot", + "0emm", + "1kapp", + "3utilities", + "4u", + "africa", + "amazonaws", + "appspot", + "ar", + "betainabox", + "blogdns", + "blogspot", + "blogsyte", + "bloxcms", + "bounty-full", + "br", + "cechire", + "ciscofreak", + "cloudcontrolapp", + "cloudcontrolled", + "cn", + "co", + "codespot", + "damnserver", + "ddnsking", + "de", + "ditchyourip", + "dnsalias", + "dnsdojo", + "dnsiskinky", + "doesntexist", + "dontexist", + "doomdns", + "dreamhosters", + "dsmynas", + "dyn-o-saur", + "dynalias", + "dyndns-at-home", + "dyndns-at-work", + "dyndns-blog", + "dyndns-free", + "dyndns-home", + "dyndns-ip", + "dyndns-mail", + "dyndns-office", + "dyndns-pics", + "dyndns-remote", + "dyndns-server", + "dyndns-web", + "dyndns-wiki", + "dyndns-work", + "dynns", + "elasticbeanstalk", + "est-a-la-maison", + "est-a-la-masion", + "est-le-patron", + "est-mon-blogueur", + "eu", + "evennode", + "familyds", + "fbsbx", + "firebaseapp", + "flynnhub", + "freebox-os", + "freeboxos", + "from-ak", + "from-al", + "from-ar", + "from-ca", + "from-ct", + "from-dc", + "from-de", + "from-fl", + "from-ga", + "from-hi", + "from-ia", + "from-id", + "from-il", + "from-in", + "from-ks", + "from-ky", + "from-ma", + "from-md", + "from-mi", + "from-mn", + "from-mo", + "from-ms", + "from-mt", + "from-nc", + "from-nd", + "from-ne", + "from-nh", + "from-nj", + "from-nm", + "from-nv", + "from-oh", + "from-ok", + "from-or", + "from-pa", + "from-pr", + "from-ri", + "from-sc", + "from-sd", + "from-tn", + "from-tx", + "from-ut", + "from-va", + "from-vt", + "from-wa", + "from-wi", + "from-wv", + "from-wy", + "gb", + "geekgalaxy", + "getmyip", + "githubcloud", + "githubcloudusercontent", + "githubusercontent", + "googleapis", + "googlecode", + "gotdns", + "gotpantheon", + "gr", + "health-carereform", + "herokuapp", + "herokussl", + "hk", + "hobby-site", + "homelinux", + "homesecuritymac", + "homesecuritypc", + "homeunix", + "hu", + "iamallama", + "is-a-anarchist", + "is-a-blogger", + "is-a-bookkeeper", + "is-a-bulls-fan", + "is-a-caterer", + "is-a-chef", + "is-a-conservative", + "is-a-cpa", + "is-a-cubicle-slave", + "is-a-democrat", + "is-a-designer", + "is-a-doctor", + "is-a-financialadvisor", + "is-a-geek", + "is-a-green", + "is-a-guru", + "is-a-hard-worker", + "is-a-hunter", + "is-a-landscaper", + "is-a-lawyer", + "is-a-liberal", + "is-a-libertarian", + "is-a-llama", + "is-a-musician", + "is-a-nascarfan", + "is-a-nurse", + "is-a-painter", + "is-a-personaltrainer", + "is-a-photographer", + "is-a-player", + "is-a-republican", + "is-a-rockstar", + "is-a-socialist", + "is-a-student", + "is-a-teacher", + "is-a-techie", + "is-a-therapist", + "is-an-accountant", + "is-an-actor", + "is-an-actress", + "is-an-anarchist", + "is-an-artist", + "is-an-engineer", + "is-an-entertainer", + "is-certified", + "is-gone", + "is-into-anime", + "is-into-cars", + "is-into-cartoons", + "is-into-games", + "is-leet", + "is-not-certified", + "is-slick", + "is-uberleet", + "is-with-theband", + "isa-geek", + "isa-hockeynut", + "issmarterthanyou", + "jpn", + "kr", + "likes-pie", + "likescandy", + "logoip", + "meteorapp", + "mex", + "myactivedirectory", + "mydrobo", + "mysecuritycamera", + "myshopblocks", + "myvnc", + "neat-url", + "net-freaks", + "nfshost", + "no", + "on-aptible", + "onthewifi", + "operaunite", + "outsystemscloud", + "ownprovider", + "pagefrontapp", + "pagespeedmobilizer", + "pgfog", + "point2this", + "prgmr", + "qa2", + "qc", + "quicksytes", + "rackmaze", + "rhcloud", + "ro", + "ru", + "sa", + "saves-the-whales", + "se", + "securitytactics", + "selfip", + "sells-for-less", + "sells-for-u", + "servebbs", + "servebeer", + "servecounterstrike", + "serveexchange", + "serveftp", + "servegame", + "servehalflife", + "servehttp", + "servehumour", + "serveirc", + "servemp3", + "servep2p", + "servepics", + "servequake", + "servesarcasm", + "simple-url", + "sinaapp", + "space-to-rent", + "stufftoread", + "teaches-yoga", + "townnews-staging", + "uk", + "unusualperson", + "us", + "uy", + "vipsinaapp", + "withgoogle", + "withyoutube", + "workisboring", + "writesthisblog", + "xenapponazure", + "yolasite", + "za", + "ap-northeast-2", + "compute", + "compute-1", + "elb", + "eu-central-1", + "s3", + "s3-ap-northeast-1", + "s3-ap-northeast-2", + "s3-ap-southeast-1", + "s3-ap-southeast-2", + "s3-eu-central-1", + "s3-eu-west-1", + "s3-external-1", + "s3-external-2", + "s3-fips-us-gov-west-1", + "s3-sa-east-1", + "s3-us-gov-west-1", + "s3-us-west-1", + "s3-us-west-2", + "us-east-1", + "s3", + "ap-northeast-1", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "eu-central-1", + "eu-west-1", + "sa-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", + "z-1", + "z-2", + "s3", + "alpha", + "beta", + "eu-1", + "eu-2", + "us-1", + "us-2", + "apps", + "api", + "ext", + "gist", + "eu", + "xen", + "ac", + "co", + "ed", + "fi", + "go", + "or", + "sa", + "com", + "edu", + "gov", + "inf", + "net", + "org", + "blogspot", + "com", + "edu", + "net", + "org", + "ath", + "gov", + "ac", + "biz", + "com", + "ekloges", + "gov", + "ltd", + "name", + "net", + "org", + "parliament", + "press", + "pro", + "tm", + "blogspot", + "blogspot", + "co", + "e4", + "realm", + "blogspot", + "com", + "dnshome", + "fuettertdasnetz", + "goip", + "isteingeek", + "istmein", + "lebtimnetz", + "leitungsen", + "logoip", + "traeumtgerade", + "biz", + "blogspot", + "co", + "firm", + "reg", + "store", + "com", + "edu", + "gov", + "net", + "org", + "art", + "com", + "edu", + "gob", + "gov", + "mil", + "net", + "org", + "sld", + "web", + "art", + "asso", + "com", + "edu", + "gov", + "net", + "org", + "pol", + "com", + "edu", + "fin", + "gob", + "gov", + "info", + "k12", + "med", + "mil", + "net", + "org", + "pro", + "aip", + "com", + "edu", + "fie", + "gov", + "lib", + "med", + "org", + "pri", + "riik", + "blogspot", + "com", + "edu", + "eun", + "gov", + "mil", + "name", + "net", + "org", + "sci", + "blogspot", + "com", + "edu", + "gob", + "nom", + "org", + "blogspot", + "compute", + "biz", + "com", + "edu", + "gov", + "info", + "name", + "net", + "org", + "mycd", + "aland", + "blogspot", + "dy", + "iki", + "aeroport", + "assedic", + "asso", + "avocat", + "avoues", + "blogspot", + "cci", + "chambagri", + "chirurgiens-dentistes", + "chirurgiens-dentistes-en-france", + "com", + "experts-comptables", + "fbx-os", + "fbxos", + "freebox-os", + "freeboxos", + "geometre-expert", + "gouv", + "greta", + "huissier-justice", + "medecin", + "nom", + "notaires", + "pharmacien", + "port", + "prd", + "presse", + "tm", + "veterinaire", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "pvt", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "org", + "com", + "edu", + "gov", + "ltd", + "mod", + "org", + "co", + "com", + "edu", + "net", + "org", + "ac", + "com", + "edu", + "gov", + "net", + "org", + "asso", + "com", + "edu", + "mobi", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gob", + "ind", + "mil", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "idv", + "inc", + "ltd", + "net", + "org", + "xn--55qx5d", + "xn--ciqpn", + "xn--gmq050i", + "xn--gmqw5a", + "xn--io0a7i", + "xn--lcvr32d", + "xn--mk0axi", + "xn--mxtq1m", + "xn--od0alg", + "xn--od0aq3b", + "xn--tn0ag", + "xn--uc0atv", + "xn--uc0ay4a", + "xn--wcvs22d", + "xn--zf0avx", + "com", + "edu", + "gob", + "mil", + "net", + "org", + "blogspot", + "com", + "from", + "iz", + "name", + "adult", + "art", + "asso", + "com", + "coop", + "edu", + "firm", + "gouv", + "info", + "med", + "net", + "org", + "perso", + "pol", + "pro", + "rel", + "shop", + "2000", + "agrar", + "blogspot", + "bolt", + "casino", + "city", + "co", + "erotica", + "erotika", + "film", + "forum", + "games", + "hotel", + "info", + "ingatlan", + "jogasz", + "konyvelo", + "lakas", + "media", + "news", + "org", + "priv", + "reklam", + "sex", + "shop", + "sport", + "suli", + "szex", + "tm", + "tozsde", + "utazas", + "video", + "ac", + "biz", + "co", + "desa", + "go", + "mil", + "my", + "net", + "or", + "sch", + "web", + "blogspot", + "blogspot", + "gov", + "ac", + "co", + "gov", + "idf", + "k12", + "muni", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "net", + "org", + "tt", + "tv", + "ltd", + "plc", + "ac", + "blogspot", + "co", + "edu", + "firm", + "gen", + "gov", + "ind", + "mil", + "net", + "nic", + "org", + "res", + "barrel-of-knowledge", + "barrell-of-knowledge", + "dvrcam", + "dyndns", + "for-our", + "groks-the", + "groks-this", + "here-for-more", + "ilovecollege", + "knowsitall", + "no-ip", + "nsupdate", + "selfip", + "webhop", + "eu", + "boxfuse", + "com", + "dedyn", + "github", + "hasura-app", + "hzc", + "ngrok", + "nid", + "pantheonsite", + "sandcats", + "spacekit", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "gov", + "id", + "net", + "org", + "sch", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "blogspot", + "com", + "cupcake", + "edu", + "gov", + "int", + "net", + "org", + "abr", + "abruzzo", + "ag", + "agrigento", + "al", + "alessandria", + "alto-adige", + "altoadige", + "an", + "ancona", + "andria-barletta-trani", + "andria-trani-barletta", + "andriabarlettatrani", + "andriatranibarletta", + "ao", + "aosta", + "aosta-valley", + "aostavalley", + "aoste", + "ap", + "aq", + "aquila", + "ar", + "arezzo", + "ascoli-piceno", + "ascolipiceno", + "asti", + "at", + "av", + "avellino", + "ba", + "balsan", + "bari", + "barletta-trani-andria", + "barlettatraniandria", + "bas", + "basilicata", + "belluno", + "benevento", + "bergamo", + "bg", + "bi", + "biella", + "bl", + "blogspot", + "bn", + "bo", + "bologna", + "bolzano", + "bozen", + "br", + "brescia", + "brindisi", + "bs", + "bt", + "bz", + "ca", + "cagliari", + "cal", + "calabria", + "caltanissetta", + "cam", + "campania", + "campidano-medio", + "campidanomedio", + "campobasso", + "carbonia-iglesias", + "carboniaiglesias", + "carrara-massa", + "carraramassa", + "caserta", + "catania", + "catanzaro", + "cb", + "ce", + "cesena-forli", + "cesenaforli", + "ch", + "chieti", + "ci", + "cl", + "cn", + "co", + "como", + "cosenza", + "cr", + "cremona", + "crotone", + "cs", + "ct", + "cuneo", + "cz", + "dell-ogliastra", + "dellogliastra", + "edu", + "emilia-romagna", + "emiliaromagna", + "emr", + "en", + "enna", + "fc", + "fe", + "fermo", + "ferrara", + "fg", + "fi", + "firenze", + "florence", + "fm", + "foggia", + "forli-cesena", + "forlicesena", + "fr", + "friuli-v-giulia", + "friuli-ve-giulia", + "friuli-vegiulia", + "friuli-venezia-giulia", + "friuli-veneziagiulia", + "friuli-vgiulia", + "friuliv-giulia", + "friulive-giulia", + "friulivegiulia", + "friulivenezia-giulia", + "friuliveneziagiulia", + "friulivgiulia", + "frosinone", + "fvg", + "ge", + "genoa", + "genova", + "go", + "gorizia", + "gov", + "gr", + "grosseto", + "iglesias-carbonia", + "iglesiascarbonia", + "im", + "imperia", + "is", + "isernia", + "kr", + "la-spezia", + "laquila", + "laspezia", + "latina", + "laz", + "lazio", + "lc", + "le", + "lecce", + "lecco", + "li", + "lig", + "liguria", + "livorno", + "lo", + "lodi", + "lom", + "lombardia", + "lombardy", + "lt", + "lu", + "lucania", + "lucca", + "macerata", + "mantova", + "mar", + "marche", + "massa-carrara", + "massacarrara", + "matera", + "mb", + "mc", + "me", + "medio-campidano", + "mediocampidano", + "messina", + "mi", + "milan", + "milano", + "mn", + "mo", + "modena", + "mol", + "molise", + "monza", + "monza-brianza", + "monza-e-della-brianza", + "monzabrianza", + "monzaebrianza", + "monzaedellabrianza", + "ms", + "mt", + "na", + "naples", + "napoli", + "no", + "novara", + "nu", + "nuoro", + "og", + "ogliastra", + "olbia-tempio", + "olbiatempio", + "or", + "oristano", + "ot", + "pa", + "padova", + "padua", + "palermo", + "parma", + "pavia", + "pc", + "pd", + "pe", + "perugia", + "pesaro-urbino", + "pesarourbino", + "pescara", + "pg", + "pi", + "piacenza", + "piedmont", + "piemonte", + "pisa", + "pistoia", + "pmn", + "pn", + "po", + "pordenone", + "potenza", + "pr", + "prato", + "pt", + "pu", + "pug", + "puglia", + "pv", + "pz", + "ra", + "ragusa", + "ravenna", + "rc", + "re", + "reggio-calabria", + "reggio-emilia", + "reggiocalabria", + "reggioemilia", + "rg", + "ri", + "rieti", + "rimini", + "rm", + "rn", + "ro", + "roma", + "rome", + "rovigo", + "sa", + "salerno", + "sar", + "sardegna", + "sardinia", + "sassari", + "savona", + "si", + "sic", + "sicilia", + "sicily", + "siena", + "siracusa", + "so", + "sondrio", + "sp", + "sr", + "ss", + "suedtirol", + "sv", + "ta", + "taa", + "taranto", + "te", + "tempio-olbia", + "tempioolbia", + "teramo", + "terni", + "tn", + "to", + "torino", + "tos", + "toscana", + "tp", + "tr", + "trani-andria-barletta", + "trani-barletta-andria", + "traniandriabarletta", + "tranibarlettaandria", + "trapani", + "trentino", + "trentino-a-adige", + "trentino-aadige", + "trentino-alto-adige", + "trentino-altoadige", + "trentino-s-tirol", + "trentino-stirol", + "trentino-sud-tirol", + "trentino-sudtirol", + "trentino-sued-tirol", + "trentino-suedtirol", + "trentinoa-adige", + "trentinoaadige", + "trentinoalto-adige", + "trentinoaltoadige", + "trentinos-tirol", + "trentinostirol", + "trentinosud-tirol", + "trentinosudtirol", + "trentinosued-tirol", + "trentinosuedtirol", + "trento", + "treviso", + "trieste", + "ts", + "turin", + "tuscany", + "tv", + "ud", + "udine", + "umb", + "umbria", + "urbino-pesaro", + "urbinopesaro", + "va", + "val-d-aosta", + "val-daosta", + "vald-aosta", + "valdaosta", + "valle-aosta", + "valle-d-aosta", + "valle-daosta", + "valleaosta", + "valled-aosta", + "valledaosta", + "vallee-aoste", + "valleeaoste", + "vao", + "varese", + "vb", + "vc", + "vda", + "ve", + "ven", + "veneto", + "venezia", + "venice", + "verbania", + "vercelli", + "verona", + "vi", + "vibo-valentia", + "vibovalentia", + "vicenza", + "viterbo", + "vr", + "vs", + "vt", + "vv", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "ac", + "ad", + "aichi", + "akita", + "aomori", + "blogspot", + "chiba", + "co", + "ed", + "ehime", + "fukui", + "fukuoka", + "fukushima", + "gifu", + "go", + "gr", + "gunma", + "hiroshima", + "hokkaido", + "hyogo", + "ibaraki", + "ishikawa", + "iwate", + "kagawa", + "kagoshima", + "kanagawa", + "kawasaki", + "kitakyushu", + "kobe", + "kochi", + "kumamoto", + "kyoto", + "lg", + "mie", + "miyagi", + "miyazaki", + "nagano", + "nagasaki", + "nagoya", + "nara", + "ne", + "niigata", + "oita", + "okayama", + "okinawa", + "or", + "osaka", + "saga", + "saitama", + "sapporo", + "sendai", + "shiga", + "shimane", + "shizuoka", + "tochigi", + "tokushima", + "tokyo", + "tottori", + "toyama", + "wakayama", + "xn--0trq7p7nn", + "xn--1ctwo", + "xn--1lqs03n", + "xn--1lqs71d", + "xn--2m4a15e", + "xn--32vp30h", + "xn--4it168d", + "xn--4it797k", + "xn--4pvxs", + "xn--5js045d", + "xn--5rtp49c", + "xn--5rtq34k", + "xn--6btw5a", + "xn--6orx2r", + "xn--7t0a264c", + "xn--8ltr62k", + "xn--8pvr4u", + "xn--c3s14m", + "xn--d5qv7z876c", + "xn--djrs72d6uy", + "xn--djty4k", + "xn--efvn9s", + "xn--ehqz56n", + "xn--elqq16h", + "xn--f6qx53a", + "xn--k7yn95e", + "xn--kbrq7o", + "xn--klt787d", + "xn--kltp7d", + "xn--kltx9a", + "xn--klty5x", + "xn--mkru45i", + "xn--nit225k", + "xn--ntso0iqx3a", + "xn--ntsq17g", + "xn--pssu33l", + "xn--qqqt11m", + "xn--rht27z", + "xn--rht3d", + "xn--rht61e", + "xn--rny31h", + "xn--tor131o", + "xn--uist22h", + "xn--uisz3g", + "xn--uuwu58a", + "xn--vgu402c", + "xn--zbx025d", + "yamagata", + "yamaguchi", + "yamanashi", + "yokohama", + "aisai", + "ama", + "anjo", + "asuke", + "chiryu", + "chita", + "fuso", + "gamagori", + "handa", + "hazu", + "hekinan", + "higashiura", + "ichinomiya", + "inazawa", + "inuyama", + "isshiki", + "iwakura", + "kanie", + "kariya", + "kasugai", + "kira", + "kiyosu", + "komaki", + "konan", + "kota", + "mihama", + "miyoshi", + "nishio", + "nisshin", + "obu", + "oguchi", + "oharu", + "okazaki", + "owariasahi", + "seto", + "shikatsu", + "shinshiro", + "shitara", + "tahara", + "takahama", + "tobishima", + "toei", + "togo", + "tokai", + "tokoname", + "toyoake", + "toyohashi", + "toyokawa", + "toyone", + "toyota", + "tsushima", + "yatomi", + "akita", + "daisen", + "fujisato", + "gojome", + "hachirogata", + "happou", + "higashinaruse", + "honjo", + "honjyo", + "ikawa", + "kamikoani", + "kamioka", + "katagami", + "kazuno", + "kitaakita", + "kosaka", + "kyowa", + "misato", + "mitane", + "moriyoshi", + "nikaho", + "noshiro", + "odate", + "oga", + "ogata", + "semboku", + "yokote", + "yurihonjo", + "aomori", + "gonohe", + "hachinohe", + "hashikami", + "hiranai", + "hirosaki", + "itayanagi", + "kuroishi", + "misawa", + "mutsu", + "nakadomari", + "noheji", + "oirase", + "owani", + "rokunohe", + "sannohe", + "shichinohe", + "shingo", + "takko", + "towada", + "tsugaru", + "tsuruta", + "abiko", + "asahi", + "chonan", + "chosei", + "choshi", + "chuo", + "funabashi", + "futtsu", + "hanamigawa", + "ichihara", + "ichikawa", + "ichinomiya", + "inzai", + "isumi", + "kamagaya", + "kamogawa", + "kashiwa", + "katori", + "katsuura", + "kimitsu", + "kisarazu", + "kozaki", + "kujukuri", + "kyonan", + "matsudo", + "midori", + "mihama", + "minamiboso", + "mobara", + "mutsuzawa", + "nagara", + "nagareyama", + "narashino", + "narita", + "noda", + "oamishirasato", + "omigawa", + "onjuku", + "otaki", + "sakae", + "sakura", + "shimofusa", + "shirako", + "shiroi", + "shisui", + "sodegaura", + "sosa", + "tako", + "tateyama", + "togane", + "tohnosho", + "tomisato", + "urayasu", + "yachimata", + "yachiyo", + "yokaichiba", + "yokoshibahikari", + "yotsukaido", + "ainan", + "honai", + "ikata", + "imabari", + "iyo", + "kamijima", + "kihoku", + "kumakogen", + "masaki", + "matsuno", + "matsuyama", + "namikata", + "niihama", + "ozu", + "saijo", + "seiyo", + "shikokuchuo", + "tobe", + "toon", + "uchiko", + "uwajima", + "yawatahama", + "echizen", + "eiheiji", + "fukui", + "ikeda", + "katsuyama", + "mihama", + "minamiechizen", + "obama", + "ohi", + "ono", + "sabae", + "sakai", + "takahama", + "tsuruga", + "wakasa", + "ashiya", + "buzen", + "chikugo", + "chikuho", + "chikujo", + "chikushino", + "chikuzen", + "chuo", + "dazaifu", + "fukuchi", + "hakata", + "higashi", + "hirokawa", + "hisayama", + "iizuka", + "inatsuki", + "kaho", + "kasuga", + "kasuya", + "kawara", + "keisen", + "koga", + "kurate", + "kurogi", + "kurume", + "minami", + "miyako", + "miyama", + "miyawaka", + "mizumaki", + "munakata", + "nakagawa", + "nakama", + "nishi", + "nogata", + "ogori", + "okagaki", + "okawa", + "oki", + "omuta", + "onga", + "onojo", + "oto", + "saigawa", + "sasaguri", + "shingu", + "shinyoshitomi", + "shonai", + "soeda", + "sue", + "tachiarai", + "tagawa", + "takata", + "toho", + "toyotsu", + "tsuiki", + "ukiha", + "umi", + "usui", + "yamada", + "yame", + "yanagawa", + "yukuhashi", + "aizubange", + "aizumisato", + "aizuwakamatsu", + "asakawa", + "bandai", + "date", + "fukushima", + "furudono", + "futaba", + "hanawa", + "higashi", + "hirata", + "hirono", + "iitate", + "inawashiro", + "ishikawa", + "iwaki", + "izumizaki", + "kagamiishi", + "kaneyama", + "kawamata", + "kitakata", + "kitashiobara", + "koori", + "koriyama", + "kunimi", + "miharu", + "mishima", + "namie", + "nango", + "nishiaizu", + "nishigo", + "okuma", + "omotego", + "ono", + "otama", + "samegawa", + "shimogo", + "shirakawa", + "showa", + "soma", + "sukagawa", + "taishin", + "tamakawa", + "tanagura", + "tenei", + "yabuki", + "yamato", + "yamatsuri", + "yanaizu", + "yugawa", + "anpachi", + "ena", + "gifu", + "ginan", + "godo", + "gujo", + "hashima", + "hichiso", + "hida", + "higashishirakawa", + "ibigawa", + "ikeda", + "kakamigahara", + "kani", + "kasahara", + "kasamatsu", + "kawaue", + "kitagata", + "mino", + "minokamo", + "mitake", + "mizunami", + "motosu", + "nakatsugawa", + "ogaki", + "sakahogi", + "seki", + "sekigahara", + "shirakawa", + "tajimi", + "takayama", + "tarui", + "toki", + "tomika", + "wanouchi", + "yamagata", + "yaotsu", + "yoro", + "annaka", + "chiyoda", + "fujioka", + "higashiagatsuma", + "isesaki", + "itakura", + "kanna", + "kanra", + "katashina", + "kawaba", + "kiryu", + "kusatsu", + "maebashi", + "meiwa", + "midori", + "minakami", + "naganohara", + "nakanojo", + "nanmoku", + "numata", + "oizumi", + "ora", + "ota", + "shibukawa", + "shimonita", + "shinto", + "showa", + "takasaki", + "takayama", + "tamamura", + "tatebayashi", + "tomioka", + "tsukiyono", + "tsumagoi", + "ueno", + "yoshioka", + "asaminami", + "daiwa", + "etajima", + "fuchu", + "fukuyama", + "hatsukaichi", + "higashihiroshima", + "hongo", + "jinsekikogen", + "kaita", + "kui", + "kumano", + "kure", + "mihara", + "miyoshi", + "naka", + "onomichi", + "osakikamijima", + "otake", + "saka", + "sera", + "seranishi", + "shinichi", + "shobara", + "takehara", + "abashiri", + "abira", + "aibetsu", + "akabira", + "akkeshi", + "asahikawa", + "ashibetsu", + "ashoro", + "assabu", + "atsuma", + "bibai", + "biei", + "bifuka", + "bihoro", + "biratori", + "chippubetsu", + "chitose", + "date", + "ebetsu", + "embetsu", + "eniwa", + "erimo", + "esan", + "esashi", + "fukagawa", + "fukushima", + "furano", + "furubira", + "haboro", + "hakodate", + "hamatonbetsu", + "hidaka", + "higashikagura", + "higashikawa", + "hiroo", + "hokuryu", + "hokuto", + "honbetsu", + "horokanai", + "horonobe", + "ikeda", + "imakane", + "ishikari", + "iwamizawa", + "iwanai", + "kamifurano", + "kamikawa", + "kamishihoro", + "kamisunagawa", + "kamoenai", + "kayabe", + "kembuchi", + "kikonai", + "kimobetsu", + "kitahiroshima", + "kitami", + "kiyosato", + "koshimizu", + "kunneppu", + "kuriyama", + "kuromatsunai", + "kushiro", + "kutchan", + "kyowa", + "mashike", + "matsumae", + "mikasa", + "minamifurano", + "mombetsu", + "moseushi", + "mukawa", + "muroran", + "naie", + "nakagawa", + "nakasatsunai", + "nakatombetsu", + "nanae", + "nanporo", + "nayoro", + "nemuro", + "niikappu", + "niki", + "nishiokoppe", + "noboribetsu", + "numata", + "obihiro", + "obira", + "oketo", + "okoppe", + "otaru", + "otobe", + "otofuke", + "otoineppu", + "oumu", + "ozora", + "pippu", + "rankoshi", + "rebun", + "rikubetsu", + "rishiri", + "rishirifuji", + "saroma", + "sarufutsu", + "shakotan", + "shari", + "shibecha", + "shibetsu", + "shikabe", + "shikaoi", + "shimamaki", + "shimizu", + "shimokawa", + "shinshinotsu", + "shintoku", + "shiranuka", + "shiraoi", + "shiriuchi", + "sobetsu", + "sunagawa", + "taiki", + "takasu", + "takikawa", + "takinoue", + "teshikaga", + "tobetsu", + "tohma", + "tomakomai", + "tomari", + "toya", + "toyako", + "toyotomi", + "toyoura", + "tsubetsu", + "tsukigata", + "urakawa", + "urausu", + "uryu", + "utashinai", + "wakkanai", + "wassamu", + "yakumo", + "yoichi", + "aioi", + "akashi", + "ako", + "amagasaki", + "aogaki", + "asago", + "ashiya", + "awaji", + "fukusaki", + "goshiki", + "harima", + "himeji", + "ichikawa", + "inagawa", + "itami", + "kakogawa", + "kamigori", + "kamikawa", + "kasai", + "kasuga", + "kawanishi", + "miki", + "minamiawaji", + "nishinomiya", + "nishiwaki", + "ono", + "sanda", + "sannan", + "sasayama", + "sayo", + "shingu", + "shinonsen", + "shiso", + "sumoto", + "taishi", + "taka", + "takarazuka", + "takasago", + "takino", + "tamba", + "tatsuno", + "toyooka", + "yabu", + "yashiro", + "yoka", + "yokawa", + "ami", + "asahi", + "bando", + "chikusei", + "daigo", + "fujishiro", + "hitachi", + "hitachinaka", + "hitachiomiya", + "hitachiota", + "ibaraki", + "ina", + "inashiki", + "itako", + "iwama", + "joso", + "kamisu", + "kasama", + "kashima", + "kasumigaura", + "koga", + "miho", + "mito", + "moriya", + "naka", + "namegata", + "oarai", + "ogawa", + "omitama", + "ryugasaki", + "sakai", + "sakuragawa", + "shimodate", + "shimotsuma", + "shirosato", + "sowa", + "suifu", + "takahagi", + "tamatsukuri", + "tokai", + "tomobe", + "tone", + "toride", + "tsuchiura", + "tsukuba", + "uchihara", + "ushiku", + "yachiyo", + "yamagata", + "yawara", + "yuki", + "anamizu", + "hakui", + "hakusan", + "kaga", + "kahoku", + "kanazawa", + "kawakita", + "komatsu", + "nakanoto", + "nanao", + "nomi", + "nonoichi", + "noto", + "shika", + "suzu", + "tsubata", + "tsurugi", + "uchinada", + "wajima", + "fudai", + "fujisawa", + "hanamaki", + "hiraizumi", + "hirono", + "ichinohe", + "ichinoseki", + "iwaizumi", + "iwate", + "joboji", + "kamaishi", + "kanegasaki", + "karumai", + "kawai", + "kitakami", + "kuji", + "kunohe", + "kuzumaki", + "miyako", + "mizusawa", + "morioka", + "ninohe", + "noda", + "ofunato", + "oshu", + "otsuchi", + "rikuzentakata", + "shiwa", + "shizukuishi", + "sumita", + "tanohata", + "tono", + "yahaba", + "yamada", + "ayagawa", + "higashikagawa", + "kanonji", + "kotohira", + "manno", + "marugame", + "mitoyo", + "naoshima", + "sanuki", + "tadotsu", + "takamatsu", + "tonosho", + "uchinomi", + "utazu", + "zentsuji", + "akune", + "amami", + "hioki", + "isa", + "isen", + "izumi", + "kagoshima", + "kanoya", + "kawanabe", + "kinko", + "kouyama", + "makurazaki", + "matsumoto", + "minamitane", + "nakatane", + "nishinoomote", + "satsumasendai", + "soo", + "tarumizu", + "yusui", + "aikawa", + "atsugi", + "ayase", + "chigasaki", + "ebina", + "fujisawa", + "hadano", + "hakone", + "hiratsuka", + "isehara", + "kaisei", + "kamakura", + "kiyokawa", + "matsuda", + "minamiashigara", + "miura", + "nakai", + "ninomiya", + "odawara", + "oi", + "oiso", + "sagamihara", + "samukawa", + "tsukui", + "yamakita", + "yamato", + "yokosuka", + "yugawara", + "zama", + "zushi", + "city", + "city", + "city", + "aki", + "geisei", + "hidaka", + "higashitsuno", + "ino", + "kagami", + "kami", + "kitagawa", + "kochi", + "mihara", + "motoyama", + "muroto", + "nahari", + "nakamura", + "nankoku", + "nishitosa", + "niyodogawa", + "ochi", + "okawa", + "otoyo", + "otsuki", + "sakawa", + "sukumo", + "susaki", + "tosa", + "tosashimizu", + "toyo", + "tsuno", + "umaji", + "yasuda", + "yusuhara", + "amakusa", + "arao", + "aso", + "choyo", + "gyokuto", + "hitoyoshi", + "kamiamakusa", + "kashima", + "kikuchi", + "kumamoto", + "mashiki", + "mifune", + "minamata", + "minamioguni", + "nagasu", + "nishihara", + "oguni", + "ozu", + "sumoto", + "takamori", + "uki", + "uto", + "yamaga", + "yamato", + "yatsushiro", + "ayabe", + "fukuchiyama", + "higashiyama", + "ide", + "ine", + "joyo", + "kameoka", + "kamo", + "kita", + "kizu", + "kumiyama", + "kyotamba", + "kyotanabe", + "kyotango", + "maizuru", + "minami", + "minamiyamashiro", + "miyazu", + "muko", + "nagaokakyo", + "nakagyo", + "nantan", + "oyamazaki", + "sakyo", + "seika", + "tanabe", + "uji", + "ujitawara", + "wazuka", + "yamashina", + "yawata", + "asahi", + "inabe", + "ise", + "kameyama", + "kawagoe", + "kiho", + "kisosaki", + "kiwa", + "komono", + "kumano", + "kuwana", + "matsusaka", + "meiwa", + "mihama", + "minamiise", + "misugi", + "miyama", + "nabari", + "shima", + "suzuka", + "tado", + "taiki", + "taki", + "tamaki", + "toba", + "tsu", + "udono", + "ureshino", + "watarai", + "yokkaichi", + "furukawa", + "higashimatsushima", + "ishinomaki", + "iwanuma", + "kakuda", + "kami", + "kawasaki", + "marumori", + "matsushima", + "minamisanriku", + "misato", + "murata", + "natori", + "ogawara", + "ohira", + "onagawa", + "osaki", + "rifu", + "semine", + "shibata", + "shichikashuku", + "shikama", + "shiogama", + "shiroishi", + "tagajo", + "taiwa", + "tome", + "tomiya", + "wakuya", + "watari", + "yamamoto", + "zao", + "aya", + "ebino", + "gokase", + "hyuga", + "kadogawa", + "kawaminami", + "kijo", + "kitagawa", + "kitakata", + "kitaura", + "kobayashi", + "kunitomi", + "kushima", + "mimata", + "miyakonojo", + "miyazaki", + "morotsuka", + "nichinan", + "nishimera", + "nobeoka", + "saito", + "shiiba", + "shintomi", + "takaharu", + "takanabe", + "takazaki", + "tsuno", + "achi", + "agematsu", + "anan", + "aoki", + "asahi", + "azumino", + "chikuhoku", + "chikuma", + "chino", + "fujimi", + "hakuba", + "hara", + "hiraya", + "iida", + "iijima", + "iiyama", + "iizuna", + "ikeda", + "ikusaka", + "ina", + "karuizawa", + "kawakami", + "kiso", + "kisofukushima", + "kitaaiki", + "komagane", + "komoro", + "matsukawa", + "matsumoto", + "miasa", + "minamiaiki", + "minamimaki", + "minamiminowa", + "minowa", + "miyada", + "miyota", + "mochizuki", + "nagano", + "nagawa", + "nagiso", + "nakagawa", + "nakano", + "nozawaonsen", + "obuse", + "ogawa", + "okaya", + "omachi", + "omi", + "ookuwa", + "ooshika", + "otaki", + "otari", + "sakae", + "sakaki", + "saku", + "sakuho", + "shimosuwa", + "shinanomachi", + "shiojiri", + "suwa", + "suzaka", + "takagi", + "takamori", + "takayama", + "tateshina", + "tatsuno", + "togakushi", + "togura", + "tomi", + "ueda", + "wada", + "yamagata", + "yamanouchi", + "yasaka", + "yasuoka", + "chijiwa", + "futsu", + "goto", + "hasami", + "hirado", + "iki", + "isahaya", + "kawatana", + "kuchinotsu", + "matsuura", + "nagasaki", + "obama", + "omura", + "oseto", + "saikai", + "sasebo", + "seihi", + "shimabara", + "shinkamigoto", + "togitsu", + "tsushima", + "unzen", + "city", + "ando", + "gose", + "heguri", + "higashiyoshino", + "ikaruga", + "ikoma", + "kamikitayama", + "kanmaki", + "kashiba", + "kashihara", + "katsuragi", + "kawai", + "kawakami", + "kawanishi", + "koryo", + "kurotaki", + "mitsue", + "miyake", + "nara", + "nosegawa", + "oji", + "ouda", + "oyodo", + "sakurai", + "sango", + "shimoichi", + "shimokitayama", + "shinjo", + "soni", + "takatori", + "tawaramoto", + "tenkawa", + "tenri", + "uda", + "yamatokoriyama", + "yamatotakada", + "yamazoe", + "yoshino", + "aga", + "agano", + "gosen", + "itoigawa", + "izumozaki", + "joetsu", + "kamo", + "kariwa", + "kashiwazaki", + "minamiuonuma", + "mitsuke", + "muika", + "murakami", + "myoko", + "nagaoka", + "niigata", + "ojiya", + "omi", + "sado", + "sanjo", + "seiro", + "seirou", + "sekikawa", + "shibata", + "tagami", + "tainai", + "tochio", + "tokamachi", + "tsubame", + "tsunan", + "uonuma", + "yahiko", + "yoita", + "yuzawa", + "beppu", + "bungoono", + "bungotakada", + "hasama", + "hiji", + "himeshima", + "hita", + "kamitsue", + "kokonoe", + "kuju", + "kunisaki", + "kusu", + "oita", + "saiki", + "taketa", + "tsukumi", + "usa", + "usuki", + "yufu", + "akaiwa", + "asakuchi", + "bizen", + "hayashima", + "ibara", + "kagamino", + "kasaoka", + "kibichuo", + "kumenan", + "kurashiki", + "maniwa", + "misaki", + "nagi", + "niimi", + "nishiawakura", + "okayama", + "satosho", + "setouchi", + "shinjo", + "shoo", + "soja", + "takahashi", + "tamano", + "tsuyama", + "wake", + "yakage", + "aguni", + "ginowan", + "ginoza", + "gushikami", + "haebaru", + "higashi", + "hirara", + "iheya", + "ishigaki", + "ishikawa", + "itoman", + "izena", + "kadena", + "kin", + "kitadaito", + "kitanakagusuku", + "kumejima", + "kunigami", + "minamidaito", + "motobu", + "nago", + "naha", + "nakagusuku", + "nakijin", + "nanjo", + "nishihara", + "ogimi", + "okinawa", + "onna", + "shimoji", + "taketomi", + "tarama", + "tokashiki", + "tomigusuku", + "tonaki", + "urasoe", + "uruma", + "yaese", + "yomitan", + "yonabaru", + "yonaguni", + "zamami", + "abeno", + "chihayaakasaka", + "chuo", + "daito", + "fujiidera", + "habikino", + "hannan", + "higashiosaka", + "higashisumiyoshi", + "higashiyodogawa", + "hirakata", + "ibaraki", + "ikeda", + "izumi", + "izumiotsu", + "izumisano", + "kadoma", + "kaizuka", + "kanan", + "kashiwara", + "katano", + "kawachinagano", + "kishiwada", + "kita", + "kumatori", + "matsubara", + "minato", + "minoh", + "misaki", + "moriguchi", + "neyagawa", + "nishi", + "nose", + "osakasayama", + "sakai", + "sayama", + "sennan", + "settsu", + "shijonawate", + "shimamoto", + "suita", + "tadaoka", + "taishi", + "tajiri", + "takaishi", + "takatsuki", + "tondabayashi", + "toyonaka", + "toyono", + "yao", + "ariake", + "arita", + "fukudomi", + "genkai", + "hamatama", + "hizen", + "imari", + "kamimine", + "kanzaki", + "karatsu", + "kashima", + "kitagata", + "kitahata", + "kiyama", + "kouhoku", + "kyuragi", + "nishiarita", + "ogi", + "omachi", + "ouchi", + "saga", + "shiroishi", + "taku", + "tara", + "tosu", + "yoshinogari", + "arakawa", + "asaka", + "chichibu", + "fujimi", + "fujimino", + "fukaya", + "hanno", + "hanyu", + "hasuda", + "hatogaya", + "hatoyama", + "hidaka", + "higashichichibu", + "higashimatsuyama", + "honjo", + "ina", + "iruma", + "iwatsuki", + "kamiizumi", + "kamikawa", + "kamisato", + "kasukabe", + "kawagoe", + "kawaguchi", + "kawajima", + "kazo", + "kitamoto", + "koshigaya", + "kounosu", + "kuki", + "kumagaya", + "matsubushi", + "minano", + "misato", + "miyashiro", + "miyoshi", + "moroyama", + "nagatoro", + "namegawa", + "niiza", + "ogano", + "ogawa", + "ogose", + "okegawa", + "omiya", + "otaki", + "ranzan", + "ryokami", + "saitama", + "sakado", + "satte", + "sayama", + "shiki", + "shiraoka", + "soka", + "sugito", + "toda", + "tokigawa", + "tokorozawa", + "tsurugashima", + "urawa", + "warabi", + "yashio", + "yokoze", + "yono", + "yorii", + "yoshida", + "yoshikawa", + "yoshimi", + "city", + "city", + "aisho", + "gamo", + "higashiomi", + "hikone", + "koka", + "konan", + "kosei", + "koto", + "kusatsu", + "maibara", + "moriyama", + "nagahama", + "nishiazai", + "notogawa", + "omihachiman", + "otsu", + "ritto", + "ryuoh", + "takashima", + "takatsuki", + "torahime", + "toyosato", + "yasu", + "akagi", + "ama", + "gotsu", + "hamada", + "higashiizumo", + "hikawa", + "hikimi", + "izumo", + "kakinoki", + "masuda", + "matsue", + "misato", + "nishinoshima", + "ohda", + "okinoshima", + "okuizumo", + "shimane", + "tamayu", + "tsuwano", + "unnan", + "yakumo", + "yasugi", + "yatsuka", + "arai", + "atami", + "fuji", + "fujieda", + "fujikawa", + "fujinomiya", + "fukuroi", + "gotemba", + "haibara", + "hamamatsu", + "higashiizu", + "ito", + "iwata", + "izu", + "izunokuni", + "kakegawa", + "kannami", + "kawanehon", + "kawazu", + "kikugawa", + "kosai", + "makinohara", + "matsuzaki", + "minamiizu", + "mishima", + "morimachi", + "nishiizu", + "numazu", + "omaezaki", + "shimada", + "shimizu", + "shimoda", + "shizuoka", + "susono", + "yaizu", + "yoshida", + "ashikaga", + "bato", + "haga", + "ichikai", + "iwafune", + "kaminokawa", + "kanuma", + "karasuyama", + "kuroiso", + "mashiko", + "mibu", + "moka", + "motegi", + "nasu", + "nasushiobara", + "nikko", + "nishikata", + "nogi", + "ohira", + "ohtawara", + "oyama", + "sakura", + "sano", + "shimotsuke", + "shioya", + "takanezawa", + "tochigi", + "tsuga", + "ujiie", + "utsunomiya", + "yaita", + "aizumi", + "anan", + "ichiba", + "itano", + "kainan", + "komatsushima", + "matsushige", + "mima", + "minami", + "miyoshi", + "mugi", + "nakagawa", + "naruto", + "sanagochi", + "shishikui", + "tokushima", + "wajiki", + "adachi", + "akiruno", + "akishima", + "aogashima", + "arakawa", + "bunkyo", + "chiyoda", + "chofu", + "chuo", + "edogawa", + "fuchu", + "fussa", + "hachijo", + "hachioji", + "hamura", + "higashikurume", + "higashimurayama", + "higashiyamato", + "hino", + "hinode", + "hinohara", + "inagi", + "itabashi", + "katsushika", + "kita", + "kiyose", + "kodaira", + "koganei", + "kokubunji", + "komae", + "koto", + "kouzushima", + "kunitachi", + "machida", + "meguro", + "minato", + "mitaka", + "mizuho", + "musashimurayama", + "musashino", + "nakano", + "nerima", + "ogasawara", + "okutama", + "ome", + "oshima", + "ota", + "setagaya", + "shibuya", + "shinagawa", + "shinjuku", + "suginami", + "sumida", + "tachikawa", + "taito", + "tama", + "toshima", + "chizu", + "hino", + "kawahara", + "koge", + "kotoura", + "misasa", + "nanbu", + "nichinan", + "sakaiminato", + "tottori", + "wakasa", + "yazu", + "yonago", + "asahi", + "fuchu", + "fukumitsu", + "funahashi", + "himi", + "imizu", + "inami", + "johana", + "kamiichi", + "kurobe", + "nakaniikawa", + "namerikawa", + "nanto", + "nyuzen", + "oyabe", + "taira", + "takaoka", + "tateyama", + "toga", + "tonami", + "toyama", + "unazuki", + "uozu", + "yamada", + "arida", + "aridagawa", + "gobo", + "hashimoto", + "hidaka", + "hirogawa", + "inami", + "iwade", + "kainan", + "kamitonda", + "katsuragi", + "kimino", + "kinokawa", + "kitayama", + "koya", + "koza", + "kozagawa", + "kudoyama", + "kushimoto", + "mihama", + "misato", + "nachikatsuura", + "shingu", + "shirahama", + "taiji", + "tanabe", + "wakayama", + "yuasa", + "yura", + "asahi", + "funagata", + "higashine", + "iide", + "kahoku", + "kaminoyama", + "kaneyama", + "kawanishi", + "mamurogawa", + "mikawa", + "murayama", + "nagai", + "nakayama", + "nanyo", + "nishikawa", + "obanazawa", + "oe", + "oguni", + "ohkura", + "oishida", + "sagae", + "sakata", + "sakegawa", + "shinjo", + "shirataka", + "shonai", + "takahata", + "tendo", + "tozawa", + "tsuruoka", + "yamagata", + "yamanobe", + "yonezawa", + "yuza", + "abu", + "hagi", + "hikari", + "hofu", + "iwakuni", + "kudamatsu", + "mitou", + "nagato", + "oshima", + "shimonoseki", + "shunan", + "tabuse", + "tokuyama", + "toyota", + "ube", + "yuu", + "chuo", + "doshi", + "fuefuki", + "fujikawa", + "fujikawaguchiko", + "fujiyoshida", + "hayakawa", + "hokuto", + "ichikawamisato", + "kai", + "kofu", + "koshu", + "kosuge", + "minami-alps", + "minobu", + "nakamichi", + "nanbu", + "narusawa", + "nirasaki", + "nishikatsura", + "oshino", + "otsuki", + "showa", + "tabayama", + "tsuru", + "uenohara", + "yamanakako", + "yamanashi", + "city", + "co", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "ass", + "asso", + "com", + "coop", + "edu", + "gouv", + "gov", + "medecin", + "mil", + "nom", + "notaires", + "org", + "pharmaciens", + "prd", + "presse", + "tm", + "veterinaire", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "org", + "rep", + "tra", + "ac", + "blogspot", + "busan", + "chungbuk", + "chungnam", + "co", + "daegu", + "daejeon", + "es", + "gangwon", + "go", + "gwangju", + "gyeongbuk", + "gyeonggi", + "gyeongnam", + "hs", + "incheon", + "jeju", + "jeonbuk", + "jeonnam", + "kg", + "mil", + "ms", + "ne", + "or", + "pe", + "re", + "sc", + "seoul", + "ulsan", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "c", + "com", + "edu", + "gov", + "info", + "int", + "net", + "org", + "per", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "oy", + "blogspot", + "cyon", + "mypep", + "ac", + "assn", + "com", + "edu", + "gov", + "grp", + "hotel", + "int", + "ltd", + "net", + "ngo", + "org", + "sch", + "soc", + "web", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "blogspot", + "gov", + "blogspot", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "mil", + "net", + "org", + "com", + "edu", + "gov", + "id", + "med", + "net", + "org", + "plc", + "sch", + "ac", + "co", + "gov", + "net", + "org", + "press", + "router", + "asso", + "tm", + "blogspot", + "ac", + "brasilia", + "co", + "daplie", + "ddns", + "diskstation", + "dnsfor", + "dscloud", + "edu", + "gov", + "hopto", + "i234", + "its", + "loginto", + "myds", + "net", + "noip", + "org", + "priv", + "synology", + "webhop", + "co", + "com", + "edu", + "gov", + "mil", + "nom", + "org", + "prd", + "tm", + "blogspot", + "com", + "edu", + "gov", + "inf", + "name", + "net", + "org", + "com", + "edu", + "gouv", + "gov", + "net", + "org", + "presse", + "edu", + "gov", + "nyc", + "org", + "com", + "edu", + "gov", + "net", + "org", + "dscloud", + "blogspot", + "gov", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "gov", + "net", + "or", + "org", + "academy", + "agriculture", + "air", + "airguard", + "alabama", + "alaska", + "amber", + "ambulance", + "american", + "americana", + "americanantiques", + "americanart", + "amsterdam", + "and", + "annefrank", + "anthro", + "anthropology", + "antiques", + "aquarium", + "arboretum", + "archaeological", + "archaeology", + "architecture", + "art", + "artanddesign", + "artcenter", + "artdeco", + "arteducation", + "artgallery", + "arts", + "artsandcrafts", + "asmatart", + "assassination", + "assisi", + "association", + "astronomy", + "atlanta", + "austin", + "australia", + "automotive", + "aviation", + "axis", + "badajoz", + "baghdad", + "bahn", + "bale", + "baltimore", + "barcelona", + "baseball", + "basel", + "baths", + "bauern", + "beauxarts", + "beeldengeluid", + "bellevue", + "bergbau", + "berkeley", + "berlin", + "bern", + "bible", + "bilbao", + "bill", + "birdart", + "birthplace", + "bonn", + "boston", + "botanical", + "botanicalgarden", + "botanicgarden", + "botany", + "brandywinevalley", + "brasil", + "bristol", + "british", + "britishcolumbia", + "broadcast", + "brunel", + "brussel", + "brussels", + "bruxelles", + "building", + "burghof", + "bus", + "bushey", + "cadaques", + "california", + "cambridge", + "can", + "canada", + "capebreton", + "carrier", + "cartoonart", + "casadelamoneda", + "castle", + "castres", + "celtic", + "center", + "chattanooga", + "cheltenham", + "chesapeakebay", + "chicago", + "children", + "childrens", + "childrensgarden", + "chiropractic", + "chocolate", + "christiansburg", + "cincinnati", + "cinema", + "circus", + "civilisation", + "civilization", + "civilwar", + "clinton", + "clock", + "coal", + "coastaldefence", + "cody", + "coldwar", + "collection", + "colonialwilliamsburg", + "coloradoplateau", + "columbia", + "columbus", + "communication", + "communications", + "community", + "computer", + "computerhistory", + "contemporary", + "contemporaryart", + "convent", + "copenhagen", + "corporation", + "corvette", + "costume", + "countryestate", + "county", + "crafts", + "cranbrook", + "creation", + "cultural", + "culturalcenter", + "culture", + "cyber", + "cymru", + "dali", + "dallas", + "database", + "ddr", + "decorativearts", + "delaware", + "delmenhorst", + "denmark", + "depot", + "design", + "detroit", + "dinosaur", + "discovery", + "dolls", + "donostia", + "durham", + "eastafrica", + "eastcoast", + "education", + "educational", + "egyptian", + "eisenbahn", + "elburg", + "elvendrell", + "embroidery", + "encyclopedic", + "england", + "entomology", + "environment", + "environmentalconservation", + "epilepsy", + "essex", + "estate", + "ethnology", + "exeter", + "exhibition", + "family", + "farm", + "farmequipment", + "farmers", + "farmstead", + "field", + "figueres", + "filatelia", + "film", + "fineart", + "finearts", + "finland", + "flanders", + "florida", + "force", + "fortmissoula", + "fortworth", + "foundation", + "francaise", + "frankfurt", + "franziskaner", + "freemasonry", + "freiburg", + "fribourg", + "frog", + "fundacio", + "furniture", + "gallery", + "garden", + "gateway", + "geelvinck", + "gemological", + "geology", + "georgia", + "giessen", + "glas", + "glass", + "gorge", + "grandrapids", + "graz", + "guernsey", + "halloffame", + "hamburg", + "handson", + "harvestcelebration", + "hawaii", + "health", + "heimatunduhren", + "hellas", + "helsinki", + "hembygdsforbund", + "heritage", + "histoire", + "historical", + "historicalsociety", + "historichouses", + "historisch", + "historisches", + "history", + "historyofscience", + "horology", + "house", + "humanities", + "illustration", + "imageandsound", + "indian", + "indiana", + "indianapolis", + "indianmarket", + "intelligence", + "interactive", + "iraq", + "iron", + "isleofman", + "jamison", + "jefferson", + "jerusalem", + "jewelry", + "jewish", + "jewishart", + "jfk", + "journalism", + "judaica", + "judygarland", + "juedisches", + "juif", + "karate", + "karikatur", + "kids", + "koebenhavn", + "koeln", + "kunst", + "kunstsammlung", + "kunstunddesign", + "labor", + "labour", + "lajolla", + "lancashire", + "landes", + "lans", + "larsson", + "lewismiller", + "lincoln", + "linz", + "living", + "livinghistory", + "localhistory", + "london", + "losangeles", + "louvre", + "loyalist", + "lucerne", + "luxembourg", + "luzern", + "mad", + "madrid", + "mallorca", + "manchester", + "mansion", + "mansions", + "manx", + "marburg", + "maritime", + "maritimo", + "maryland", + "marylhurst", + "media", + "medical", + "medizinhistorisches", + "meeres", + "memorial", + "mesaverde", + "michigan", + "midatlantic", + "military", + "mill", + "miners", + "mining", + "minnesota", + "missile", + "missoula", + "modern", + "moma", + "money", + "monmouth", + "monticello", + "montreal", + "moscow", + "motorcycle", + "muenchen", + "muenster", + "mulhouse", + "muncie", + "museet", + "museumcenter", + "museumvereniging", + "music", + "national", + "nationalfirearms", + "nationalheritage", + "nativeamerican", + "naturalhistory", + "naturalhistorymuseum", + "naturalsciences", + "nature", + "naturhistorisches", + "natuurwetenschappen", + "naumburg", + "naval", + "nebraska", + "neues", + "newhampshire", + "newjersey", + "newmexico", + "newport", + "newspaper", + "newyork", + "niepce", + "norfolk", + "north", + "nrw", + "nuernberg", + "nuremberg", + "nyc", + "nyny", + "oceanographic", + "oceanographique", + "omaha", + "online", + "ontario", + "openair", + "oregon", + "oregontrail", + "otago", + "oxford", + "pacific", + "paderborn", + "palace", + "paleo", + "palmsprings", + "panama", + "paris", + "pasadena", + "pharmacy", + "philadelphia", + "philadelphiaarea", + "philately", + "phoenix", + "photography", + "pilots", + "pittsburgh", + "planetarium", + "plantation", + "plants", + "plaza", + "portal", + "portland", + "portlligat", + "posts-and-telecommunications", + "preservation", + "presidio", + "press", + "project", + "public", + "pubol", + "quebec", + "railroad", + "railway", + "research", + "resistance", + "riodejaneiro", + "rochester", + "rockart", + "roma", + "russia", + "saintlouis", + "salem", + "salvadordali", + "salzburg", + "sandiego", + "sanfrancisco", + "santabarbara", + "santacruz", + "santafe", + "saskatchewan", + "satx", + "savannahga", + "schlesisches", + "schoenbrunn", + "schokoladen", + "school", + "schweiz", + "science", + "science-fiction", + "scienceandhistory", + "scienceandindustry", + "sciencecenter", + "sciencecenters", + "sciencehistory", + "sciences", + "sciencesnaturelles", + "scotland", + "seaport", + "settlement", + "settlers", + "shell", + "sherbrooke", + "sibenik", + "silk", + "ski", + "skole", + "society", + "sologne", + "soundandvision", + "southcarolina", + "southwest", + "space", + "spy", + "square", + "stadt", + "stalbans", + "starnberg", + "state", + "stateofdelaware", + "station", + "steam", + "steiermark", + "stjohn", + "stockholm", + "stpetersburg", + "stuttgart", + "suisse", + "surgeonshall", + "surrey", + "svizzera", + "sweden", + "sydney", + "tank", + "tcm", + "technology", + "telekommunikation", + "television", + "texas", + "textile", + "theater", + "time", + "timekeeping", + "topology", + "torino", + "touch", + "town", + "transport", + "tree", + "trolley", + "trust", + "trustee", + "uhren", + "ulm", + "undersea", + "university", + "usa", + "usantiques", + "usarts", + "uscountryestate", + "usculture", + "usdecorativearts", + "usgarden", + "ushistory", + "ushuaia", + "uslivinghistory", + "utah", + "uvic", + "valley", + "vantaa", + "versailles", + "viking", + "village", + "virginia", + "virtual", + "virtuel", + "vlaanderen", + "volkenkunde", + "wales", + "wallonie", + "war", + "washingtondc", + "watch-and-clock", + "watchandclock", + "western", + "westfalen", + "whaling", + "wildlife", + "williamsburg", + "windmill", + "workshop", + "xn--9dbhblg6di", + "xn--comunicaes-v6a2o", + "xn--correios-e-telecomunicaes-ghc29a", + "xn--h1aegh", + "xn--lns-qla", + "york", + "yorkshire", + "yosemite", + "youth", + "zoological", + "zoology", + "aero", + "biz", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "mil", + "museum", + "name", + "net", + "org", + "pro", + "ac", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "int", + "museum", + "net", + "org", + "blogspot", + "com", + "edu", + "gob", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "teledata", + "ca", + "cc", + "co", + "com", + "dr", + "in", + "info", + "mobi", + "mx", + "name", + "or", + "org", + "pro", + "school", + "tv", + "us", + "ws", + "her", + "his", + "forgot", + "forgot", + "asso", + "at-band-camp", + "azure-mobile", + "azurewebsites", + "blogdns", + "bounceme", + "broke-it", + "buyshouses", + "cdn77", + "cdn77-ssl", + "cloudapp", + "cloudfront", + "cloudfunctions", + "cryptonomic", + "ddns", + "dnsalias", + "dnsdojo", + "does-it", + "dontexist", + "dsmynas", + "dynalias", + "dynathome", + "dynv6", + "eating-organic", + "endofinternet", + "familyds", + "fastly", + "from-az", + "from-co", + "from-la", + "from-ny", + "gb", + "gets-it", + "ham-radio-op", + "homeftp", + "homeip", + "homelinux", + "homeunix", + "hu", + "in", + "in-the-band", + "is-a-chef", + "is-a-geek", + "isa-geek", + "jp", + "kicks-ass", + "mydissent", + "myeffect", + "myfritz", + "mymediapc", + "mypsx", + "mysecuritycamera", + "nhlfan", + "no-ip", + "office-on-the", + "pgafan", + "podzone", + "privatizehealthinsurance", + "rackmaze", + "redirectme", + "scrapper-site", + "se", + "selfip", + "sells-it", + "servebbs", + "serveblog", + "serveftp", + "serveminecraft", + "sytes", + "thruhere", + "uk", + "webhop", + "za", + "r", + "prod", + "ssl", + "a", + "global", + "a", + "b", + "global", + "alces", + "arts", + "com", + "firm", + "info", + "net", + "other", + "per", + "rec", + "store", + "web", + "com", + "edu", + "gov", + "i", + "mil", + "mobi", + "name", + "net", + "org", + "sch", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gob", + "in", + "info", + "int", + "mil", + "net", + "nom", + "org", + "web", + "blogspot", + "bv", + "co", + "virtueeldomein", + "aa", + "aarborte", + "aejrie", + "afjord", + "agdenes", + "ah", + "akershus", + "aknoluokta", + "akrehamn", + "al", + "alaheadju", + "alesund", + "algard", + "alstahaug", + "alta", + "alvdal", + "amli", + "amot", + "andasuolo", + "andebu", + "andoy", + "ardal", + "aremark", + "arendal", + "arna", + "aseral", + "asker", + "askim", + "askoy", + "askvoll", + "asnes", + "audnedaln", + "aukra", + "aure", + "aurland", + "aurskog-holand", + "austevoll", + "austrheim", + "averoy", + "badaddja", + "bahcavuotna", + "bahccavuotna", + "baidar", + "bajddar", + "balat", + "balestrand", + "ballangen", + "balsfjord", + "bamble", + "bardu", + "barum", + "batsfjord", + "bearalvahki", + "beardu", + "beiarn", + "berg", + "bergen", + "berlevag", + "bievat", + "bindal", + "birkenes", + "bjarkoy", + "bjerkreim", + "bjugn", + "blogspot", + "bodo", + "bokn", + "bomlo", + "bremanger", + "bronnoy", + "bronnoysund", + "brumunddal", + "bryne", + "bu", + "budejju", + "buskerud", + "bygland", + "bykle", + "cahcesuolo", + "co", + "davvenjarga", + "davvesiida", + "deatnu", + "dep", + "dielddanuorri", + "divtasvuodna", + "divttasvuotna", + "donna", + "dovre", + "drammen", + "drangedal", + "drobak", + "dyroy", + "egersund", + "eid", + "eidfjord", + "eidsberg", + "eidskog", + "eidsvoll", + "eigersund", + "elverum", + "enebakk", + "engerdal", + "etne", + "etnedal", + "evenassi", + "evenes", + "evje-og-hornnes", + "farsund", + "fauske", + "fedje", + "fet", + "fetsund", + "fhs", + "finnoy", + "fitjar", + "fjaler", + "fjell", + "fla", + "flakstad", + "flatanger", + "flekkefjord", + "flesberg", + "flora", + "floro", + "fm", + "folkebibl", + "folldal", + "forde", + "forsand", + "fosnes", + "frana", + "fredrikstad", + "frei", + "frogn", + "froland", + "frosta", + "froya", + "fuoisku", + "fuossko", + "fusa", + "fylkesbibl", + "fyresdal", + "gaivuotna", + "galsa", + "gamvik", + "gangaviika", + "gaular", + "gausdal", + "giehtavuoatna", + "gildeskal", + "giske", + "gjemnes", + "gjerdrum", + "gjerstad", + "gjesdal", + "gjovik", + "gloppen", + "gol", + "gran", + "grane", + "granvin", + "gratangen", + "grimstad", + "grong", + "grue", + "gulen", + "guovdageaidnu", + "ha", + "habmer", + "hadsel", + "hagebostad", + "halden", + "halsa", + "hamar", + "hamaroy", + "hammarfeasta", + "hammerfest", + "hapmir", + "haram", + "hareid", + "harstad", + "hasvik", + "hattfjelldal", + "haugesund", + "hedmark", + "hemne", + "hemnes", + "hemsedal", + "herad", + "hitra", + "hjartdal", + "hjelmeland", + "hl", + "hm", + "hobol", + "hof", + "hokksund", + "hol", + "hole", + "holmestrand", + "holtalen", + "honefoss", + "hordaland", + "hornindal", + "horten", + "hoyanger", + "hoylandet", + "hurdal", + "hurum", + "hvaler", + "hyllestad", + "ibestad", + "idrett", + "inderoy", + "iveland", + "ivgu", + "jan-mayen", + "jessheim", + "jevnaker", + "jolster", + "jondal", + "jorpeland", + "kafjord", + "karasjohka", + "karasjok", + "karlsoy", + "karmoy", + "kautokeino", + "kirkenes", + "klabu", + "klepp", + "kommune", + "kongsberg", + "kongsvinger", + "kopervik", + "kraanghke", + "kragero", + "kristiansand", + "kristiansund", + "krodsherad", + "krokstadelva", + "kvafjord", + "kvalsund", + "kvam", + "kvanangen", + "kvinesdal", + "kvinnherad", + "kviteseid", + "kvitsoy", + "laakesvuemie", + "lahppi", + "langevag", + "lardal", + "larvik", + "lavagis", + "lavangen", + "leangaviika", + "lebesby", + "leikanger", + "leirfjord", + "leirvik", + "leka", + "leksvik", + "lenvik", + "lerdal", + "lesja", + "levanger", + "lier", + "lierne", + "lillehammer", + "lillesand", + "lindas", + "lindesnes", + "loabat", + "lodingen", + "lom", + "loppa", + "lorenskog", + "loten", + "lund", + "lunner", + "luroy", + "luster", + "lyngdal", + "lyngen", + "malatvuopmi", + "malselv", + "malvik", + "mandal", + "marker", + "marnardal", + "masfjorden", + "masoy", + "matta-varjjat", + "meland", + "meldal", + "melhus", + "meloy", + "meraker", + "midsund", + "midtre-gauldal", + "mil", + "mjondalen", + "mo-i-rana", + "moareke", + "modalen", + "modum", + "molde", + "more-og-romsdal", + "mosjoen", + "moskenes", + "moss", + "mosvik", + "mr", + "muosat", + "museum", + "naamesjevuemie", + "namdalseid", + "namsos", + "namsskogan", + "nannestad", + "naroy", + "narviika", + "narvik", + "naustdal", + "navuotna", + "nedre-eiker", + "nesna", + "nesodden", + "nesoddtangen", + "nesseby", + "nesset", + "nissedal", + "nittedal", + "nl", + "nord-aurdal", + "nord-fron", + "nord-odal", + "norddal", + "nordkapp", + "nordland", + "nordre-land", + "nordreisa", + "nore-og-uvdal", + "notodden", + "notteroy", + "nt", + "odda", + "of", + "oksnes", + "ol", + "omasvuotna", + "oppdal", + "oppegard", + "orkanger", + "orkdal", + "orland", + "orskog", + "orsta", + "osen", + "oslo", + "osoyro", + "osteroy", + "ostfold", + "ostre-toten", + "overhalla", + "ovre-eiker", + "oyer", + "oygarden", + "oystre-slidre", + "porsanger", + "porsangu", + "porsgrunn", + "priv", + "rade", + "radoy", + "rahkkeravju", + "raholt", + "raisa", + "rakkestad", + "ralingen", + "rana", + "randaberg", + "rauma", + "rendalen", + "rennebu", + "rennesoy", + "rindal", + "ringebu", + "ringerike", + "ringsaker", + "risor", + "rissa", + "rl", + "roan", + "rodoy", + "rollag", + "romsa", + "romskog", + "roros", + "rost", + "royken", + "royrvik", + "ruovat", + "rygge", + "salangen", + "salat", + "saltdal", + "samnanger", + "sandefjord", + "sandnes", + "sandnessjoen", + "sandoy", + "sarpsborg", + "sauda", + "sauherad", + "sel", + "selbu", + "selje", + "seljord", + "sf", + "siellak", + "sigdal", + "siljan", + "sirdal", + "skanit", + "skanland", + "skaun", + "skedsmo", + "skedsmokorset", + "ski", + "skien", + "skierva", + "skiptvet", + "skjak", + "skjervoy", + "skodje", + "slattum", + "smola", + "snaase", + "snasa", + "snillfjord", + "snoasa", + "sogndal", + "sogne", + "sokndal", + "sola", + "solund", + "somna", + "sondre-land", + "songdalen", + "sor-aurdal", + "sor-fron", + "sor-odal", + "sor-varanger", + "sorfold", + "sorreisa", + "sortland", + "sorum", + "spjelkavik", + "spydeberg", + "st", + "stange", + "stat", + "stathelle", + "stavanger", + "stavern", + "steigen", + "steinkjer", + "stjordal", + "stjordalshalsen", + "stokke", + "stor-elvdal", + "stord", + "stordal", + "storfjord", + "strand", + "stranda", + "stryn", + "sula", + "suldal", + "sund", + "sunndal", + "surnadal", + "svalbard", + "sveio", + "svelvik", + "sykkylven", + "tana", + "tananger", + "telemark", + "time", + "tingvoll", + "tinn", + "tjeldsund", + "tjome", + "tm", + "tokke", + "tolga", + "tonsberg", + "torsken", + "tr", + "trana", + "tranby", + "tranoy", + "troandin", + "trogstad", + "tromsa", + "tromso", + "trondheim", + "trysil", + "tvedestrand", + "tydal", + "tynset", + "tysfjord", + "tysnes", + "tysvar", + "ullensaker", + "ullensvang", + "ulvik", + "unjarga", + "utsira", + "va", + "vaapste", + "vadso", + "vaga", + "vagan", + "vagsoy", + "vaksdal", + "valle", + "vang", + "vanylven", + "vardo", + "varggat", + "varoy", + "vefsn", + "vega", + "vegarshei", + "vennesla", + "verdal", + "verran", + "vestby", + "vestfold", + "vestnes", + "vestre-slidre", + "vestre-toten", + "vestvagoy", + "vevelstad", + "vf", + "vgs", + "vik", + "vikna", + "vindafjord", + "voagat", + "volda", + "voss", + "vossevangen", + "xn--andy-ira", + "xn--asky-ira", + "xn--aurskog-hland-jnb", + "xn--avery-yua", + "xn--bdddj-mrabd", + "xn--bearalvhki-y4a", + "xn--berlevg-jxa", + "xn--bhcavuotna-s4a", + "xn--bhccavuotna-k7a", + "xn--bidr-5nac", + "xn--bievt-0qa", + "xn--bjarky-fya", + "xn--bjddar-pta", + "xn--blt-elab", + "xn--bmlo-gra", + "xn--bod-2na", + "xn--brnny-wuac", + "xn--brnnysund-m8ac", + "xn--brum-voa", + "xn--btsfjord-9za", + "xn--davvenjrga-y4a", + "xn--dnna-gra", + "xn--drbak-wua", + "xn--dyry-ira", + "xn--eveni-0qa01ga", + "xn--finny-yua", + "xn--fjord-lra", + "xn--fl-zia", + "xn--flor-jra", + "xn--frde-gra", + "xn--frna-woa", + "xn--frya-hra", + "xn--ggaviika-8ya47h", + "xn--gildeskl-g0a", + "xn--givuotna-8ya", + "xn--gjvik-wua", + "xn--gls-elac", + "xn--h-2fa", + "xn--hbmer-xqa", + "xn--hcesuolo-7ya35b", + "xn--hgebostad-g3a", + "xn--hmmrfeasta-s4ac", + "xn--hnefoss-q1a", + "xn--hobl-ira", + "xn--holtlen-hxa", + "xn--hpmir-xqa", + "xn--hyanger-q1a", + "xn--hylandet-54a", + "xn--indery-fya", + "xn--jlster-bya", + "xn--jrpeland-54a", + "xn--karmy-yua", + "xn--kfjord-iua", + "xn--klbu-woa", + "xn--koluokta-7ya57h", + "xn--krager-gya", + "xn--kranghke-b0a", + "xn--krdsherad-m8a", + "xn--krehamn-dxa", + "xn--krjohka-hwab49j", + "xn--ksnes-uua", + "xn--kvfjord-nxa", + "xn--kvitsy-fya", + "xn--kvnangen-k0a", + "xn--l-1fa", + "xn--laheadju-7ya", + "xn--langevg-jxa", + "xn--ldingen-q1a", + "xn--leagaviika-52b", + "xn--lesund-hua", + "xn--lgrd-poac", + "xn--lhppi-xqa", + "xn--linds-pra", + "xn--loabt-0qa", + "xn--lrdal-sra", + "xn--lrenskog-54a", + "xn--lt-liac", + "xn--lten-gra", + "xn--lury-ira", + "xn--mely-ira", + "xn--merker-kua", + "xn--mjndalen-64a", + "xn--mlatvuopmi-s4a", + "xn--mli-tla", + "xn--mlselv-iua", + "xn--moreke-jua", + "xn--mosjen-eya", + "xn--mot-tla", + "xn--mre-og-romsdal-qqb", + "xn--msy-ula0h", + "xn--mtta-vrjjat-k7af", + "xn--muost-0qa", + "xn--nmesjevuemie-tcba", + "xn--nry-yla5g", + "xn--nttery-byae", + "xn--nvuotna-hwa", + "xn--oppegrd-ixa", + "xn--ostery-fya", + "xn--osyro-wua", + "xn--porsgu-sta26f", + "xn--rady-ira", + "xn--rdal-poa", + "xn--rde-ula", + "xn--rdy-0nab", + "xn--rennesy-v1a", + "xn--rhkkervju-01af", + "xn--rholt-mra", + "xn--risa-5na", + "xn--risr-ira", + "xn--rland-uua", + "xn--rlingen-mxa", + "xn--rmskog-bya", + "xn--rros-gra", + "xn--rskog-uua", + "xn--rst-0na", + "xn--rsta-fra", + "xn--ryken-vua", + "xn--ryrvik-bya", + "xn--s-1fa", + "xn--sandnessjen-ogb", + "xn--sandy-yua", + "xn--seral-lra", + "xn--sgne-gra", + "xn--skierv-uta", + "xn--skjervy-v1a", + "xn--skjk-soa", + "xn--sknit-yqa", + "xn--sknland-fxa", + "xn--slat-5na", + "xn--slt-elab", + "xn--smla-hra", + "xn--smna-gra", + "xn--snase-nra", + "xn--sndre-land-0cb", + "xn--snes-poa", + "xn--snsa-roa", + "xn--sr-aurdal-l8a", + "xn--sr-fron-q1a", + "xn--sr-odal-q1a", + "xn--sr-varanger-ggb", + "xn--srfold-bya", + "xn--srreisa-q1a", + "xn--srum-gra", + "xn--stfold-9xa", + "xn--stjrdal-s1a", + "xn--stjrdalshalsen-sqb", + "xn--stre-toten-zcb", + "xn--tjme-hra", + "xn--tnsberg-q1a", + "xn--trany-yua", + "xn--trgstad-r1a", + "xn--trna-woa", + "xn--troms-zua", + "xn--tysvr-vra", + "xn--unjrga-rta", + "xn--vads-jra", + "xn--vard-jra", + "xn--vegrshei-c0a", + "xn--vestvgy-ixa6o", + "xn--vg-yiab", + "xn--vgan-qoa", + "xn--vgsy-qoa0j", + "xn--vre-eiker-k8a", + "xn--vrggt-xqad", + "xn--vry-yla5g", + "xn--yer-zna", + "xn--ygarden-p1a", + "xn--ystre-slidre-ujb", + "gs", + "gs", + "nes", + "gs", + "nes", + "gs", + "os", + "valer", + "xn--vler-qoa", + "gs", + "gs", + "os", + "gs", + "heroy", + "sande", + "gs", + "gs", + "bo", + "heroy", + "xn--b-5ga", + "xn--hery-ira", + "gs", + "gs", + "gs", + "gs", + "valer", + "gs", + "gs", + "gs", + "gs", + "bo", + "xn--b-5ga", + "gs", + "gs", + "gs", + "sande", + "gs", + "sande", + "xn--hery-ira", + "xn--vler-qoa", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "merseine", + "mine", + "shacknet", + "ac", + "co", + "cri", + "geek", + "gen", + "govt", + "health", + "iwi", + "kiwi", + "maori", + "mil", + "net", + "org", + "parliament", + "school", + "xn--mori-qsa", + "blogspot", + "co", + "com", + "edu", + "gov", + "med", + "museum", + "net", + "org", + "pro", + "ae", + "blogdns", + "blogsite", + "bmoattachments", + "boldlygoingnowhere", + "cable-modem", + "cdn77", + "cdn77-secure", + "certmgr", + "collegefan", + "couchpotatofries", + "dnsalias", + "dnsdojo", + "doesntexist", + "dontexist", + "doomdns", + "dsmynas", + "duckdns", + "dvrdns", + "dynalias", + "dyndns", + "endofinternet", + "endoftheinternet", + "eu", + "familyds", + "from-me", + "game-host", + "gotdns", + "hepforge", + "hk", + "hobby-site", + "homedns", + "homeftp", + "homelinux", + "homeunix", + "hopto", + "is-a-bruinsfan", + "is-a-candidate", + "is-a-celticsfan", + "is-a-chef", + "is-a-geek", + "is-a-knight", + "is-a-linux-user", + "is-a-patsfan", + "is-a-soxfan", + "is-found", + "is-lost", + "is-saved", + "is-very-bad", + "is-very-evil", + "is-very-good", + "is-very-nice", + "is-very-sweet", + "isa-geek", + "kicks-ass", + "misconfused", + "mlbfan", + "myftp", + "mysecuritycamera", + "nflfan", + "no-ip", + "pimienta", + "podzone", + "poivron", + "potager", + "read-books", + "readmyblog", + "selfip", + "sellsyourhome", + "servebbs", + "serveftp", + "servegame", + "stuff-4-sale", + "sweetpepper", + "tunk", + "tuxfamily", + "ufcfan", + "us", + "webhop", + "za", + "zapto", + "c", + "rsc", + "origin", + "ssl", + "go", + "home", + "al", + "asso", + "at", + "au", + "be", + "bg", + "ca", + "cd", + "ch", + "cn", + "cy", + "cz", + "de", + "dk", + "edu", + "ee", + "es", + "fi", + "fr", + "gr", + "hr", + "hu", + "ie", + "il", + "in", + "int", + "is", + "it", + "jp", + "kr", + "lt", + "lu", + "lv", + "mc", + "me", + "mk", + "mt", + "my", + "net", + "ng", + "nl", + "no", + "nz", + "paris", + "pl", + "pt", + "q-a", + "ro", + "ru", + "se", + "si", + "sk", + "tr", + "uk", + "us", + "nerdpol", + "abo", + "ac", + "com", + "edu", + "gob", + "ing", + "med", + "net", + "nom", + "org", + "sld", + "blogspot", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "org", + "com", + "edu", + "org", + "com", + "edu", + "gov", + "i", + "mil", + "net", + "ngo", + "org", + "biz", + "com", + "edu", + "fam", + "gob", + "gok", + "gon", + "gop", + "gos", + "gov", + "info", + "net", + "org", + "web", + "agro", + "aid", + "art", + "atm", + "augustow", + "auto", + "babia-gora", + "bedzin", + "beep", + "beskidy", + "bialowieza", + "bialystok", + "bielawa", + "bieszczady", + "biz", + "boleslawiec", + "bydgoszcz", + "bytom", + "cieszyn", + "co", + "com", + "czeladz", + "czest", + "dlugoleka", + "edu", + "elblag", + "elk", + "gda", + "gdansk", + "gdynia", + "gliwice", + "glogow", + "gmina", + "gniezno", + "gorlice", + "gov", + "grajewo", + "gsm", + "ilawa", + "info", + "jaworzno", + "jelenia-gora", + "jgora", + "kalisz", + "karpacz", + "kartuzy", + "kaszuby", + "katowice", + "kazimierz-dolny", + "kepno", + "ketrzyn", + "klodzko", + "kobierzyce", + "kolobrzeg", + "konin", + "konskowola", + "krakow", + "kutno", + "lapy", + "lebork", + "legnica", + "lezajsk", + "limanowa", + "lomza", + "lowicz", + "lubin", + "lukow", + "mail", + "malbork", + "malopolska", + "mazowsze", + "mazury", + "med", + "media", + "miasta", + "mielec", + "mielno", + "mil", + "mragowo", + "naklo", + "net", + "nieruchomosci", + "nom", + "nowaruda", + "nysa", + "olawa", + "olecko", + "olkusz", + "olsztyn", + "opoczno", + "opole", + "org", + "ostroda", + "ostroleka", + "ostrowiec", + "ostrowwlkp", + "pc", + "pila", + "pisz", + "podhale", + "podlasie", + "polkowice", + "pomorskie", + "pomorze", + "powiat", + "poznan", + "priv", + "prochowice", + "pruszkow", + "przeworsk", + "pulawy", + "radom", + "rawa-maz", + "realestate", + "rel", + "rybnik", + "rzeszow", + "sanok", + "sejny", + "sex", + "shop", + "sklep", + "skoczow", + "slask", + "slupsk", + "sopot", + "sos", + "sosnowiec", + "stalowa-wola", + "starachowice", + "stargard", + "suwalki", + "swidnica", + "swiebodzin", + "swinoujscie", + "szczecin", + "szczytno", + "szkola", + "targi", + "tarnobrzeg", + "tgory", + "tm", + "tourism", + "travel", + "turek", + "turystyka", + "tychy", + "ustka", + "walbrzych", + "warmia", + "warszawa", + "waw", + "wegrow", + "wielun", + "wlocl", + "wloclawek", + "wodzislaw", + "wolomin", + "wroc", + "wroclaw", + "zachpomor", + "zagan", + "zakopane", + "zarow", + "zgora", + "zgorzelec", + "ap", + "griw", + "ic", + "is", + "kmpsp", + "konsulat", + "kppsp", + "kwp", + "kwpsp", + "mup", + "mw", + "oirm", + "oum", + "pa", + "pinb", + "piw", + "po", + "psp", + "psse", + "pup", + "rzgw", + "sa", + "sdn", + "sko", + "so", + "sr", + "starostwo", + "ug", + "ugim", + "um", + "umig", + "upow", + "uppo", + "us", + "uw", + "uzs", + "wif", + "wiih", + "winb", + "wios", + "witd", + "wiw", + "wsa", + "wskr", + "wuoz", + "wzmiuw", + "zp", + "co", + "edu", + "gov", + "net", + "org", + "ac", + "biz", + "com", + "edu", + "est", + "gov", + "info", + "isla", + "name", + "net", + "org", + "pro", + "prof", + "aaa", + "aca", + "acct", + "avocat", + "bar", + "cpa", + "eng", + "jur", + "law", + "med", + "recht", + "com", + "edu", + "gov", + "net", + "org", + "plo", + "sec", + "blogspot", + "com", + "edu", + "gov", + "int", + "net", + "nome", + "org", + "publ", + "belau", + "co", + "ed", + "go", + "ne", + "or", + "com", + "coop", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "asso", + "blogspot", + "com", + "nom", + "arts", + "blogspot", + "com", + "firm", + "info", + "nom", + "nt", + "org", + "rec", + "store", + "tm", + "www", + "ac", + "blogspot", + "co", + "edu", + "gov", + "in", + "org", + "ac", + "adygeya", + "altai", + "amur", + "amursk", + "arkhangelsk", + "astrakhan", + "baikal", + "bashkiria", + "belgorod", + "bir", + "blogspot", + "bryansk", + "buryatia", + "cbg", + "chel", + "chelyabinsk", + "chita", + "chukotka", + "chuvashia", + "cmw", + "com", + "dagestan", + "dudinka", + "e-burg", + "edu", + "fareast", + "gov", + "grozny", + "int", + "irkutsk", + "ivanovo", + "izhevsk", + "jamal", + "jar", + "joshkar-ola", + "k-uralsk", + "kalmykia", + "kaluga", + "kamchatka", + "karelia", + "kazan", + "kchr", + "kemerovo", + "khabarovsk", + "khakassia", + "khv", + "kirov", + "kms", + "koenig", + "komi", + "kostroma", + "krasnoyarsk", + "kuban", + "kurgan", + "kursk", + "kustanai", + "kuzbass", + "lipetsk", + "magadan", + "mari", + "mari-el", + "marine", + "mil", + "mordovia", + "msk", + "murmansk", + "mytis", + "nakhodka", + "nalchik", + "net", + "nkz", + "nnov", + "norilsk", + "nov", + "novosibirsk", + "nsk", + "omsk", + "orenburg", + "org", + "oryol", + "oskol", + "palana", + "penza", + "perm", + "pp", + "ptz", + "pyatigorsk", + "rnd", + "rubtsovsk", + "ryazan", + "sakhalin", + "samara", + "saratov", + "simbirsk", + "smolensk", + "snz", + "spb", + "stavropol", + "stv", + "surgut", + "syzran", + "tambov", + "tatarstan", + "test", + "tom", + "tomsk", + "tsaritsyn", + "tsk", + "tula", + "tuva", + "tver", + "tyumen", + "udm", + "udmurtia", + "ulan-ude", + "vdonsk", + "vladikavkaz", + "vladimir", + "vladivostok", + "volgograd", + "vologda", + "voronezh", + "vrn", + "vyatka", + "yakutia", + "yamal", + "yaroslavl", + "yekaterinburg", + "yuzhno-sakhalinsk", + "zgrad", + "ac", + "co", + "com", + "edu", + "gouv", + "gov", + "int", + "mil", + "net", + "com", + "edu", + "gov", + "med", + "net", + "org", + "pub", + "sch", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "info", + "med", + "net", + "org", + "tv", + "a", + "ac", + "b", + "bd", + "blogspot", + "brand", + "c", + "com", + "d", + "e", + "f", + "fh", + "fhsk", + "fhv", + "g", + "h", + "i", + "k", + "komforb", + "kommunalforbund", + "komvux", + "l", + "lanbib", + "m", + "n", + "naturbruksgymn", + "o", + "org", + "p", + "parti", + "pp", + "press", + "r", + "s", + "t", + "tm", + "u", + "w", + "x", + "y", + "z", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "per", + "com", + "gov", + "hashbang", + "mil", + "net", + "org", + "platform", + "blogspot", + "cyon", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "art", + "blogspot", + "com", + "edu", + "gouv", + "org", + "perso", + "univ", + "com", + "net", + "org", + "co", + "com", + "consulado", + "edu", + "embaixada", + "gov", + "mil", + "net", + "org", + "principe", + "saotome", + "store", + "adygeya", + "arkhangelsk", + "balashov", + "bashkiria", + "bryansk", + "dagestan", + "grozny", + "ivanovo", + "kalmykia", + "kaluga", + "karelia", + "khakassia", + "krasnodar", + "kurgan", + "lenug", + "mordovia", + "msk", + "murmansk", + "nalchik", + "nov", + "obninsk", + "penza", + "pokrovsk", + "sochi", + "spb", + "togliatti", + "troitsk", + "tula", + "tuva", + "vladikavkaz", + "vladimir", + "vologda", + "com", + "edu", + "gob", + "org", + "red", + "gov", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "org", + "blogspot", + "ac", + "co", + "go", + "in", + "mi", + "net", + "or", + "ac", + "biz", + "co", + "com", + "edu", + "go", + "gov", + "int", + "mil", + "name", + "net", + "nic", + "org", + "test", + "web", + "gov", + "co", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "agrinet", + "com", + "defense", + "edunet", + "ens", + "fin", + "gov", + "ind", + "info", + "intl", + "mincom", + "nat", + "net", + "org", + "perso", + "rnrt", + "rns", + "rnu", + "tourism", + "turen", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "av", + "bbs", + "bel", + "biz", + "com", + "dr", + "edu", + "gen", + "gov", + "info", + "k12", + "kep", + "mil", + "name", + "nc", + "net", + "org", + "pol", + "tel", + "tv", + "web", + "blogspot", + "gov", + "aero", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "jobs", + "mobi", + "museum", + "name", + "net", + "org", + "pro", + "travel", + "better-than", + "dyndns", + "on-the-web", + "worse-than", + "blogspot", + "club", + "com", + "ebiz", + "edu", + "game", + "gov", + "idv", + "mil", + "net", + "org", + "xn--czrw28b", + "xn--uc0atv", + "xn--zf0ao64a", + "ac", + "co", + "go", + "hotel", + "info", + "me", + "mil", + "mobi", + "ne", + "or", + "sc", + "tv", + "biz", + "cherkassy", + "cherkasy", + "chernigov", + "chernihiv", + "chernivtsi", + "chernovtsy", + "ck", + "cn", + "co", + "com", + "cr", + "crimea", + "cv", + "dn", + "dnepropetrovsk", + "dnipropetrovsk", + "dominic", + "donetsk", + "dp", + "edu", + "gov", + "if", + "in", + "ivano-frankivsk", + "kh", + "kharkiv", + "kharkov", + "kherson", + "khmelnitskiy", + "khmelnytskyi", + "kiev", + "kirovograd", + "km", + "kr", + "krym", + "ks", + "kv", + "kyiv", + "lg", + "lt", + "lugansk", + "lutsk", + "lv", + "lviv", + "mk", + "mykolaiv", + "net", + "nikolaev", + "od", + "odesa", + "odessa", + "org", + "pl", + "poltava", + "pp", + "rivne", + "rovno", + "rv", + "sb", + "sebastopol", + "sevastopol", + "sm", + "sumy", + "te", + "ternopil", + "uz", + "uzhgorod", + "vinnica", + "vinnytsia", + "vn", + "volyn", + "yalta", + "zaporizhzhe", + "zaporizhzhia", + "zhitomir", + "zhytomyr", + "zp", + "zt", + "ac", + "blogspot", + "co", + "com", + "go", + "ne", + "or", + "org", + "sc", + "ac", + "co", + "gov", + "ltd", + "me", + "net", + "nhs", + "org", + "plc", + "police", + "sch", + "blogspot", + "no-ip", + "service", + "ak", + "al", + "ar", + "as", + "az", + "ca", + "co", + "ct", + "dc", + "de", + "dni", + "fed", + "fl", + "ga", + "golffan", + "gu", + "hi", + "ia", + "id", + "il", + "in", + "is-by", + "isa", + "kids", + "ks", + "ky", + "la", + "land-4-sale", + "ma", + "md", + "me", + "mi", + "mn", + "mo", + "ms", + "mt", + "nc", + "nd", + "ne", + "nh", + "nj", + "nm", + "noip", + "nsn", + "nv", + "ny", + "oh", + "ok", + "or", + "pa", + "pointto", + "pr", + "ri", + "sc", + "sd", + "stuff-4-sale", + "tn", + "tx", + "ut", + "va", + "vi", + "vt", + "wa", + "wi", + "wv", + "wy", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "chtr", + "paroch", + "pvt", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "cc", + "k12", + "lib", + "com", + "edu", + "gub", + "mil", + "net", + "org", + "blogspot", + "co", + "com", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "arts", + "co", + "com", + "e12", + "edu", + "firm", + "gob", + "gov", + "info", + "int", + "mil", + "net", + "org", + "rec", + "store", + "tec", + "web", + "co", + "com", + "k12", + "net", + "org", + "ac", + "biz", + "blogspot", + "com", + "edu", + "gov", + "health", + "info", + "int", + "name", + "net", + "org", + "pro", + "com", + "edu", + "net", + "org", + "com", + "dyndns", + "edu", + "gov", + "mypets", + "net", + "org", + "xn--80au", + "xn--90azh", + "xn--c1avg", + "xn--d1at", + "xn--o1ac", + "xn--o1ach", + "fhapp", + "ac", + "agric", + "alt", + "co", + "edu", + "gov", + "grondar", + "law", + "mil", + "net", + "ngo", + "nis", + "nom", + "org", + "school", + "tm", + "web", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "mil", + "net", + "org", + "sch", +} diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go new file mode 100644 index 00000000000..a56909c105b --- /dev/null +++ b/vendor/golang.org/x/net/route/address.go @@ -0,0 +1,281 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "runtime" + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// A LinkAddr represents a link-layer address. +type LinkAddr struct { + Index int // interface index when attached + Name string // interface name when attached + Addr []byte // link-layer address when attached +} + +// Family implements the Family method of Addr interface. +func (a *LinkAddr) Family() int { return sysAF_LINK } + +func parseLinkAddr(b []byte) (Addr, error) { + if len(b) < 8 { + return nil, errInvalidAddr + } + _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:]) + if err != nil { + return nil, err + } + a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4])) + return a, nil +} + +// parseKernelLinkAddr parses b as a link-layer address in +// conventional BSD kernel form. +func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + if nlen == 0xff { + nlen = 0 + } + if alen == 0xff { + alen = 0 + } + if slen == 0xff { + slen = 0 + } + l := 4 + nlen + alen + slen + if len(b) < l { + return 0, nil, errInvalidAddr + } + data := b[4:] + var name string + var addr []byte + if nlen > 0 { + name = string(data[:nlen]) + data = data[nlen:] + } + if alen > 0 { + addr = data[:alen] + data = data[alen:] + } + return l, &LinkAddr{Name: name, Addr: addr}, nil +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +// parseInetAddr parses b as an internet address for IPv4 or IPv6. +func parseInetAddr(af int, b []byte) (Addr, error) { + switch af { + case sysAF_INET: + if len(b) < 16 { + return nil, errInvalidAddr + } + a := &Inet4Addr{} + copy(a.IP[:], b[4:8]) + return a, nil + case sysAF_INET6: + if len(b) < 28 { + return nil, errInvalidAddr + } + a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} + copy(a.IP[:], b[8:24]) + if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + id := int(bigEndian.Uint16(a.IP[2:4])) + if id != 0 { + a.ZoneID = id + a.IP[2], a.IP[3] = 0, 0 + } + } + return a, nil + default: + return nil, errInvalidAddr + } +} + +// parseKernelInetAddr parses b as an internet address in conventional +// BSD kernel form. +func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + l := int(b[0]) + if runtime.GOOS == "darwin" { + // On Darwn, an address in the kernel form is also + // used as a message filler. + if l == 0 || len(b) > roundup(l) { + l = roundup(l) + } + } else { + l = roundup(l) + } + if len(b) < l { + return 0, nil, errInvalidAddr + } + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const ( + off4 = 4 // offset of in_addr + off6 = 8 // offset of in6_addr + ) + switch { + case b[0] == 28: // size of sockaddr_in6 + a := &Inet6Addr{} + copy(a.IP[:], b[off6:off6+16]) + return int(b[0]), a, nil + case af == sysAF_INET6: + a := &Inet6Addr{} + if l-1 < off6 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off6:l]) + } + return int(b[0]), a, nil + case b[0] == 16: // size of sockaddr_in + a := &Inet4Addr{} + copy(a.IP[:], b[off4:off4+4]) + return int(b[0]), a, nil + default: // an old fashion, AF_UNSPEC or unknown means AF_INET + a := &Inet4Addr{} + if l-1 < off4 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off4:l]) + } + return int(b[0]), a, nil + } +} + +// A DefaultAddr represents an address of various operating +// system-specific features. +type DefaultAddr struct { + af int + Raw []byte // raw format of address +} + +// Family implements the Family method of Addr interface. +func (a *DefaultAddr) Family() int { return a.af } + +func parseDefaultAddr(b []byte) (Addr, error) { + if len(b) < 2 || len(b) < int(b[0]) { + return nil, errInvalidAddr + } + a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]} + return a, nil +} + +func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { + var as [sysRTAX_MAX]Addr + af := int(sysAF_UNSPEC) + for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ { + if attrs&(1<> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go new file mode 100644 index 00000000000..f452ad14ce6 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_darwin.go @@ -0,0 +1,106 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STAT = C.NET_RT_STAT + sysNET_RT_TRASH = C.NET_RT_TRASH + sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2 + sysNET_RT_DUMP2 = C.NET_RT_DUMP2 + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFINFO2 = C.RTM_IFINFO2 + sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2 + sysRTM_GET2 = C.RTM_GET2 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr + sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2 + sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2 + sizeofIfDataDarwin15 = C.sizeof_struct_if_data + sizeofIfData64Darwin15 = C.sizeof_struct_if_data64 + + sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr + sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 + sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics +) diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go new file mode 100644 index 00000000000..c737751d76d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_dragonfly.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B + sysCTL_LWKT = C.CTL_LWKT + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_MPLS1 = C.RTA_MPLS1 + sysRTA_MPLS2 = C.RTA_MPLS2 + sysRTA_MPLS3 = C.RTA_MPLS3 + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MPLS1 = C.RTAX_MPLS1 + sysRTAX_MPLS2 = C.RTAX_MPLS2 + sysRTAX_MPLS3 = C.RTAX_MPLS3 + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics +) diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go new file mode 100644 index 00000000000..8f834e81db5 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_freebsd.go @@ -0,0 +1,329 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +struct if_data_freebsd7 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd9 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd10 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_vhid; + u_char ifi_baudrate_pf; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + uint64_t ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd11 { + uint8_t ifi_type; + uint8_t ifi_physical; + uint8_t ifi_addrlen; + uint8_t ifi_hdrlen; + uint8_t ifi_link_state; + uint8_t ifi_vhid; + uint16_t ifi_datalen; + uint32_t ifi_mtu; + uint32_t ifi_metric; + uint64_t ifi_baudrate; + uint64_t ifi_ipackets; + uint64_t ifi_ierrors; + uint64_t ifi_opackets; + uint64_t ifi_oerrors; + uint64_t ifi_collisions; + uint64_t ifi_ibytes; + uint64_t ifi_obytes; + uint64_t ifi_imcasts; + uint64_t ifi_omcasts; + uint64_t ifi_iqdrops; + uint64_t ifi_oqdrops; + uint64_t ifi_noproto; + uint64_t ifi_hwassist; + union { + time_t tt; + uint64_t ph; + } __ifi_epoch; + union { + struct timeval tv; + struct { + uint64_t ph1; + uint64_t ph2; + } ph; + } __ifi_lastchange; +}; + +struct if_msghdr_freebsd7 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd7 ifm_data; +}; + +struct if_msghdr_freebsd8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd8 ifm_data; +}; + +struct if_msghdr_freebsd9 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd9 ifm_data; +}; + +struct if_msghdr_freebsd10 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd10 ifm_data; +}; + +struct if_msghdr_freebsd11 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd11 ifm_data; +}; +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_IFMALIST = C.NET_RT_IFMALIST + sysNET_RT_IFLISTL = C.NET_RT_IFLISTL +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11 + + sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 +) diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go new file mode 100644 index 00000000000..b18d85e0165 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_netbsd.go @@ -0,0 +1,104 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_DDB = C.CTL_DDB + sysCTL_PROC = C.CTL_PROC + sysCTL_VENDOR = C.CTL_VENDOR + sysCTL_EMUL = C.CTL_EMUL + sysCTL_SECURITY = C.CTL_SECURITY + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + sysRTM_SETGATE = C.RTM_SETGATE + sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_CHGADDR = C.RTM_CHGADDR + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_TAG = C.RTA_TAG + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_TAG = C.RTAX_TAG + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr + sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics +) diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go new file mode 100644 index 00000000000..5df7a43bc3e --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_openbsd.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STATS = C.NET_RT_STATS + sysNET_RT_TABLE = C.NET_RT_TABLE + sysNET_RT_IFNAMES = C.NET_RT_IFNAMES + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_FS = C.CTL_FS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_DDB = C.CTL_DDB + sysCTL_VFS = C.CTL_VFS + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_DESYNC = C.RTM_DESYNC + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_SRC = C.RTA_SRC + sysRTA_SRCMASK = C.RTA_SRCMASK + sysRTA_LABEL = C.RTA_LABEL + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_SRC = C.RTAX_SRC + sysRTAX_SRCMASK = C.RTAX_SRCMASK + sysRTAX_LABEL = C.RTAX_LABEL + sysRTAX_MAX = C.RTAX_MAX +) diff --git a/vendor/golang.org/x/net/route/interface.go b/vendor/golang.org/x/net/route/interface.go new file mode 100644 index 00000000000..854906d9c42 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// An InterfaceMessage represents an interface message. +type InterfaceMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Name string // interface name + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// An InterfaceAddrMessage represents an interface address message. +type InterfaceAddrMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAddrMessage) Sys() []Sys { return nil } + +// An InterfaceMulticastAddrMessage represents an interface multicast +// address message. +type InterfaceMulticastAddrMessage struct { + Version int // message version + Type int // messsage type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil } + +// An InterfaceAnnounceMessage represents an interface announcement +// message. +type InterfaceAnnounceMessage struct { + Version int // message version + Type int // message type + Index int // interface index + Name string // interface name + What int // what type of announcement + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil } diff --git a/vendor/golang.org/x/net/route/interface_announce.go b/vendor/golang.org/x/net/route/interface_announce.go new file mode 100644 index 00000000000..520d657b578 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_announce.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd + +package route + +func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[4:6])), + What: int(nativeEndian.Uint16(b[22:24])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[6+i] != 0 { + continue + } + m.Name = string(b[6 : 6+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_classic.go b/vendor/golang.org/x/net/route/interface_classic.go new file mode 100644 index 00000000000..ac4e7a6805a --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_classic.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly netbsd + +package route + +import "runtime" + +func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Addrs: make([]Addr, sysRTAX_MAX), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + extOff: w.extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[w.bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + raw: b[:l], + } + if runtime.GOOS == "netbsd" { + m.Index = int(nativeEndian.Uint16(b[16:18])) + } else { + m.Index = int(nativeEndian.Uint16(b[12:14])) + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_freebsd.go b/vendor/golang.org/x/net/route/interface_freebsd.go new file mode 100644 index 00000000000..9f6f50c00fe --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_freebsd.go @@ -0,0 +1,78 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) { + var extOff, bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 20 { + return nil, errMessageTooShort + } + extOff = int(nativeEndian.Uint16(b[18:20])) + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + extOff = w.extOff + bodyOff = w.bodyOff + } + if len(b) < extOff || len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + Addrs: make([]Addr, sysRTAX_MAX), + extOff: extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) { + var bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 24 { + return nil, errMessageTooShort + } + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + bodyOff = w.bodyOff + } + if len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_multicast.go b/vendor/golang.org/x/net/route/interface_multicast.go new file mode 100644 index 00000000000..1e99a9cc64b --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_multicast.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd + +package route + +func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceMulticastAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_openbsd.go b/vendor/golang.org/x/net/route/interface_openbsd.go new file mode 100644 index 00000000000..e4a143c1c73 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_openbsd.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 32 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[12:16])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + Addrs: make([]Addr, sysRTAX_MAX), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + a, err := parseLinkAddr(b[ll:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 24 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + bodyOff := int(nativeEndian.Uint16(b[4:6])) + if len(b) < bodyOff { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[12:16])), + Index: int(nativeEndian.Uint16(b[6:8])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} + +func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 26 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[6:8])), + What: int(nativeEndian.Uint16(b[8:10])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[10+i] != 0 { + continue + } + m.Name = string(b[10 : 10+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go new file mode 100644 index 00000000000..d7ae0eb50fc --- /dev/null +++ b/vendor/golang.org/x/net/route/message.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// A Message represents a routing message. +// +// Note: This interface will be changed to support Marshal method in +// future version. +type Message interface { + // Sys returns operating system-specific information. + Sys() []Sys +} + +// A Sys reprensents operating system-specific information. +type Sys interface { + // SysType returns a type of operating system-specific + // information. + SysType() SysType +} + +// A SysType represents a type of operating system-specific +// information. +type SysType int + +const ( + SysMetrics SysType = iota + SysStats +) + +// ParseRIB parses b as a routing information base and returns a list +// of routing messages. +func ParseRIB(typ RIBType, b []byte) ([]Message, error) { + if !typ.parseable() { + return nil, errUnsupportedMessage + } + var msgs []Message + nmsgs, nskips := 0, 0 + for len(b) > 4 { + nmsgs++ + l := int(nativeEndian.Uint16(b[:2])) + if l == 0 { + return nil, errInvalidMessage + } + if len(b) < l { + return nil, errMessageTooShort + } + if b[2] != sysRTM_VERSION { + b = b[l:] + continue + } + mtyp := int(b[3]) + if fn, ok := parseFns[mtyp]; !ok { + nskips++ + } else { + m, err := fn(typ, b) + if err != nil { + return nil, err + } + if m == nil { + nskips++ + } else { + msgs = append(msgs, m) + } + } + b = b[l:] + } + // We failed to parse any of the messages - version mismatch? + if nmsgs != len(msgs)+nskips { + return nil, errMessageMismatch + } + return msgs, nil +} diff --git a/vendor/golang.org/x/net/route/message_darwin_test.go b/vendor/golang.org/x/net/route/message_darwin_test.go new file mode 100644 index 00000000000..3fdd12df553 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_darwin_test.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "testing" + +func TestFetchAndParseRIBOnDarwin(t *testing.T) { + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} { + ms, err := fetchAndParseRIB(af, typ) + if err != nil { + t.Error(err) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Errorf("%v %d %v", addrFamily(af), typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } + } +} diff --git a/vendor/golang.org/x/net/route/message_freebsd_test.go b/vendor/golang.org/x/net/route/message_freebsd_test.go new file mode 100644 index 00000000000..785c273f656 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_freebsd_test.go @@ -0,0 +1,106 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "testing" + "time" + "unsafe" +) + +func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + for _, typ := range []RIBType{sysNET_RT_IFMALIST} { + ms, err := fetchAndParseRIB(af, typ) + if err != nil { + t.Error(err) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Errorf("%v %d %v", addrFamily(af), typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } + } +} + +func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { + if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { + t.Skip("NET_RT_IFLISTL not supported") + } + var p uintptr + if kernelAlign != int(unsafe.Sizeof(p)) { + t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") + } + + var tests = [2]struct { + typ RIBType + b []byte + msgs []Message + ss []string + }{ + {typ: sysNET_RT_IFLIST}, + {typ: sysNET_RT_IFLISTL}, + } + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + var lastErr error + for i := 0; i < 3; i++ { + for j := range tests { + var err error + if tests[j].b, err = FetchRIB(af, tests[j].typ, 0); err != nil { + lastErr = err + time.Sleep(10 * time.Millisecond) + } + } + if lastErr == nil { + break + } + } + if lastErr != nil { + t.Error(af, lastErr) + continue + } + for i := range tests { + var err error + if tests[i].msgs, err = ParseRIB(tests[i].typ, tests[i].b); err != nil { + lastErr = err + t.Error(af, err) + } + } + if lastErr != nil { + continue + } + for i := range tests { + var err error + tests[i].ss, err = msgs(tests[i].msgs).validate() + if err != nil { + lastErr = err + t.Error(af, err) + } + for _, s := range tests[i].ss { + t.Log(s) + } + } + if lastErr != nil { + continue + } + for i := len(tests) - 1; i > 0; i-- { + if len(tests[i].ss) != len(tests[i-1].ss) { + t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss) + continue + } + for j, s1 := range tests[i].ss { + s0 := tests[i-1].ss[j] + if s1 != s0 { + t.Errorf("got %s; want %s", s1, s0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go new file mode 100644 index 00000000000..c0c7c57a9a5 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_test.go @@ -0,0 +1,118 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "os" + "syscall" + "testing" + "time" +) + +func TestFetchAndParseRIB(t *testing.T) { + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} { + ms, err := fetchAndParseRIB(af, typ) + if err != nil { + t.Error(err) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Errorf("%v %d %v", addrFamily(af), typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } + } +} + +func TestMonitorAndParseRIB(t *testing.T) { + if testing.Short() || os.Getuid() != 0 { + t.Skip("must be root") + } + + // We suppose that using an IPv4 link-local address and the + // dot1Q ID for Token Ring and FDDI doesn't harm anyone. + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(1002); err != nil { + t.Skip(err) + } + if err := pv.setup(); err != nil { + t.Skip(err) + } + pv.teardown() + + s, err := syscall.Socket(syscall.AF_ROUTE, syscall.SOCK_RAW, syscall.AF_UNSPEC) + if err != nil { + t.Fatal(err) + } + defer syscall.Close(s) + + go func() { + b := make([]byte, os.Getpagesize()) + for { + n, err := syscall.Read(s, b) + if err != nil { + return + } + ms, err := ParseRIB(0, b[:n]) + if err != nil { + t.Error(err) + return + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(err) + return + } + for _, s := range ss { + t.Log(s) + } + } + }() + + for _, vid := range []int{1002, 1003, 1004, 1005} { + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(vid); err != nil { + t.Fatal(err) + } + if err := pv.setup(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + if err := pv.teardown(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + } +} + +func TestParseRIBWithFuzz(t *testing.T) { + for _, fuzz := range []string{ + "0\x00\x05\x050000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "0000000000000\x02000000" + + "00000000", + "\x02\x00\x05\f0000000000000000" + + "0\x0200000000000000", + "\x02\x00\x05\x100000000000000\x1200" + + "0\x00\xff\x00", + "\x02\x00\x05\f0000000000000000" + + "0\x12000\x00\x02\x0000", + "\x00\x00\x00\x01\x00", + "00000", + } { + for typ := RIBType(0); typ < 256; typ++ { + ParseRIB(typ, []byte(fuzz)) + } + } +} diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go new file mode 100644 index 00000000000..c986e29ebc9 --- /dev/null +++ b/vendor/golang.org/x/net/route/route.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// Package route provides basic functions for the manipulation of +// packet routing facilities on BSD variants. +// +// The package supports any version of Darwin, any version of +// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and +// OpenBSD 5.6 and above. +package route + +import ( + "errors" + "os" + "syscall" +) + +var ( + errUnsupportedMessage = errors.New("unsupported message") + errMessageMismatch = errors.New("message mismatch") + errMessageTooShort = errors.New("message too short") + errInvalidMessage = errors.New("invalid message") + errInvalidAddr = errors.New("invalid address") +) + +// A RouteMessage represents a message conveying an address prefix, a +// nexthop address and an output interface. +type RouteMessage struct { + Version int // message version + Type int // message type + Flags int // route flags + Index int // interface index when atatched + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// A RIBType reprensents a type of routing information base. +type RIBType int + +const ( + RIBTypeRoute RIBType = syscall.NET_RT_DUMP + RIBTypeInterface RIBType = syscall.NET_RT_IFLIST +) + +// FetchRIB fetches a routing information base from the operating +// system. +// +// The provided af must be an address family. +// +// The provided arg must be a RIBType-specific argument. +// When RIBType is related to routes, arg might be a set of route +// flags. When RIBType is related to network interfaces, arg might be +// an interface index or a set of interface flags. In most cases, zero +// means a wildcard. +func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) { + mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)} + n := uintptr(0) + if err := sysctl(mib[:], nil, &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + if n == 0 { + return nil, nil + } + b := make([]byte, n) + if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + return b[:n], nil +} diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go new file mode 100644 index 00000000000..d333c6aa526 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_classic.go @@ -0,0 +1,31 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package route + +func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[4:6])), + extOff: w.extOff, + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go new file mode 100644 index 00000000000..76eae40d806 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_openbsd.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 40 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) + if err != nil { + return nil, err + } + m.Addrs = as + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_test.go b/vendor/golang.org/x/net/route/route_test.go new file mode 100644 index 00000000000..99f57b712d9 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_test.go @@ -0,0 +1,385 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "fmt" + "os/exec" + "runtime" + "time" +) + +func (m *RouteMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16]))) +} + +func (m *InterfaceMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceAddrMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceMulticastAddrMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8]))) +} + +func (m *InterfaceAnnounceMessage) String() string { + what := "" + switch m.What { + case 0: + what = "arrival" + case 1: + what = "departure" + } + return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what) +} + +func (m *InterfaceMetrics) String() string { + return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU) +} + +func (m *RouteMetrics) String() string { + return fmt.Sprintf("(pmtu=%d)", m.PathMTU) +} + +type addrAttrs uint + +var addrAttrNames = [...]string{ + "dst", + "gateway", + "netmask", + "genmask", + "ifp", + "ifa", + "author", + "brd", + "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd + "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd + "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd +} + +func (attrs addrAttrs) String() string { + var s string + for i, name := range addrAttrNames { + if attrs&(1<" + } + return s +} + +type msgs []Message + +func (ms msgs) validate() ([]string, error) { + var ss []string + for _, m := range ms { + switch m := m.(type) { + case *RouteMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceAddrMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceMulticastAddrMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceAnnounceMessage: + ss = append(ss, m.String()) + default: + ss = append(ss, fmt.Sprintf("%+v", m)) + } + } + return ss, nil +} + +type syss []Sys + +func (sys syss) String() string { + var s string + for _, sy := range sys { + switch sy := sy.(type) { + case *InterfaceMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + case *RouteMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + } + } + return s +} + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_LINK: + return "link" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *LinkAddr) String() string { + name := a.Name + if name == "" { + name = "" + } + lla := llAddr(a.Addr).String() + if lla == "" { + lla = "" + } + return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla) +} + +func (a Inet4Addr) String() string { + return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:])) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID) +} + +func (a *DefaultAddr) String() string { + return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String()) +} + +type addrs []Addr + +func (as addrs) String() string { + var s string + for _, a := range as { + if a == nil { + continue + } + if len(s) > 0 { + s += " " + } + switch a := a.(type) { + case *LinkAddr: + s += a.String() + case *Inet4Addr: + s += a.String() + case *Inet6Addr: + s += a.String() + case *DefaultAddr: + s += a.String() + } + } + if s == "" { + return "" + } + return s +} + +func (as addrs) match(attrs addrAttrs) error { + var ts addrAttrs + af := sysAF_UNSPEC + for i := range as { + if as[i] != nil { + ts |= 1 << uint(i) + } + switch as[i].(type) { + case *Inet4Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET + } + if af != sysAF_INET { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + case *Inet6Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET6 + } + if af != sysAF_INET6 { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + } + } + if ts != attrs && ts > attrs { + return fmt.Errorf("%v not included in %v", ts, attrs) + } + return nil +} + +func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) { + var err error + var b []byte + for i := 0; i < 3; i++ { + if b, err = FetchRIB(af, typ, 0); err != nil { + time.Sleep(10 * time.Millisecond) + continue + } + break + } + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + ms, err := ParseRIB(typ, b) + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + return ms, nil +} + +type propVirtual struct { + name string + addr, mask string + setupCmds []*exec.Cmd + teardownCmds []*exec.Cmd +} + +func (ti *propVirtual) setup() error { + for _, cmd := range ti.setupCmds { + if err := cmd.Run(); err != nil { + ti.teardown() + return err + } + } + return nil +} + +func (ti *propVirtual) teardown() error { + for _, cmd := range ti.teardownCmds { + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func (ti *propVirtual) configure(suffix int) error { + if runtime.GOOS == "openbsd" { + ti.name = fmt.Sprintf("vether%d", suffix) + } else { + ti.name = fmt.Sprintf("vlan%d", suffix) + } + xname, err := exec.LookPath("ifconfig") + if err != nil { + return err + } + ti.setupCmds = append(ti.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", ti.name, "create"}, + }) + if runtime.GOOS == "netbsd" { + // NetBSD requires an underlying dot1Q-capable network + // interface. + ti.setupCmds = append(ti.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", ti.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"}, + }) + } + ti.setupCmds = append(ti.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", ti.name, "inet", ti.addr, "netmask", ti.mask}, + }) + ti.teardownCmds = append(ti.teardownCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", ti.name, "destroy"}, + }) + return nil +} diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go new file mode 100644 index 00000000000..80ca83ae137 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "unsafe" + +var ( + nativeEndian binaryByteOrder + kernelAlign int + parseFns map[int]parseFn +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } + kernelAlign, parseFns = probeRoutingStack() +} + +func roundup(l int) int { + if l == 0 { + return kernelAlign + } + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} + +type parseFn func(RIBType, []byte) (Message, error) + +type wireFormat struct { + extOff int // offset of header extension + bodyOff int // offset of message body +} diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go new file mode 100644 index 00000000000..fff3a0fd1db --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_darwin.go @@ -0,0 +1,80 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STAT, sysNET_RT_TRASH: + return false + default: + return true + } +} + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]parseFn) { + rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} + rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} + ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} + ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} + ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} + // Darwin kernels require 32-bit aligned access to routing facilities. + return 4, map[int]parseFn{ + sysRTM_ADD: rtm.parseRouteMessage, + sysRTM_DELETE: rtm.parseRouteMessage, + sysRTM_CHANGE: rtm.parseRouteMessage, + sysRTM_GET: rtm.parseRouteMessage, + sysRTM_LOSING: rtm.parseRouteMessage, + sysRTM_REDIRECT: rtm.parseRouteMessage, + sysRTM_MISS: rtm.parseRouteMessage, + sysRTM_LOCK: rtm.parseRouteMessage, + sysRTM_RESOLVE: rtm.parseRouteMessage, + sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, + sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, + sysRTM_IFINFO: ifm.parseInterfaceMessage, + sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage, + sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage, + sysRTM_IFINFO2: ifm2.parseInterfaceMessage, + sysRTM_NEWMADDR2: ifmam2.parseInterfaceMulticastAddrMessage, + sysRTM_GET2: rtm2.parseRouteMessage, + } +} diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go new file mode 100644 index 00000000000..da848b3d076 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_dragonfly.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { return true } + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]parseFn) { + var p uintptr + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} + ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} + return int(unsafe.Sizeof(p)), map[int]parseFn{ + sysRTM_ADD: rtm.parseRouteMessage, + sysRTM_DELETE: rtm.parseRouteMessage, + sysRTM_CHANGE: rtm.parseRouteMessage, + sysRTM_GET: rtm.parseRouteMessage, + sysRTM_LOSING: rtm.parseRouteMessage, + sysRTM_REDIRECT: rtm.parseRouteMessage, + sysRTM_MISS: rtm.parseRouteMessage, + sysRTM_LOCK: rtm.parseRouteMessage, + sysRTM_RESOLVE: rtm.parseRouteMessage, + sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, + sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, + sysRTM_IFINFO: ifm.parseInterfaceMessage, + sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage, + sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage, + sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage, + } +} diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go new file mode 100644 index 00000000000..7b05c1a5a05 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_freebsd.go @@ -0,0 +1,150 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "syscall" + "unsafe" +) + +func (typ RIBType) parseable() bool { return true } + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + if kernelAlign == 8 { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } + } + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]parseFn) { + var p uintptr + wordSize := int(unsafe.Sizeof(p)) + align := int(unsafe.Sizeof(p)) + // In the case of kern.supported_archs="amd64 i386", we need + // to know the underlying kernel's architecture because the + // alignment for routing facilities are set at the build time + // of the kernel. + conf, _ := syscall.Sysctl("kern.conftxt") + for i, j := 0, 0; j < len(conf); j++ { + if conf[j] != '\n' { + continue + } + s := conf[i:j] + i = j + 1 + if len(s) > len("machine") && s[:len("machine")] == "machine" { + s = s[len("machine"):] + for k := 0; k < len(s); k++ { + if s[k] == ' ' || s[k] == '\t' { + s = s[1:] + } + break + } + if s == "amd64" { + align = 8 + } + break + } + } + var rtm, ifm, ifam, ifmam, ifanm *wireFormat + if align != wordSize { // 386 emulation on amd64 + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} + } else { + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} + } + rel, _ := syscall.SysctlUint32("kern.osreldate") + switch { + case rel < 800000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD7 + } + case 800000 <= rel && rel < 900000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD8 + } + case 900000 <= rel && rel < 1000000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD9 + } + case 1000000 <= rel && rel < 1100000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD10 + } + default: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD11 + } + } + return align, map[int]parseFn{ + sysRTM_ADD: rtm.parseRouteMessage, + sysRTM_DELETE: rtm.parseRouteMessage, + sysRTM_CHANGE: rtm.parseRouteMessage, + sysRTM_GET: rtm.parseRouteMessage, + sysRTM_LOSING: rtm.parseRouteMessage, + sysRTM_REDIRECT: rtm.parseRouteMessage, + sysRTM_MISS: rtm.parseRouteMessage, + sysRTM_LOCK: rtm.parseRouteMessage, + sysRTM_RESOLVE: rtm.parseRouteMessage, + sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, + sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, + sysRTM_IFINFO: ifm.parseInterfaceMessage, + sysRTM_NEWMADDR: ifmam.parseInterfaceMulticastAddrMessage, + sysRTM_DELMADDR: ifmam.parseInterfaceMulticastAddrMessage, + sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage, + } +} diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go new file mode 100644 index 00000000000..4d8076b518d --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_netbsd.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { return true } + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]parseFn) { + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} + ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} + // NetBSD 6 and above kernels require 64-bit aligned access to + // routing facilities. + return 8, map[int]parseFn{ + sysRTM_ADD: rtm.parseRouteMessage, + sysRTM_DELETE: rtm.parseRouteMessage, + sysRTM_CHANGE: rtm.parseRouteMessage, + sysRTM_GET: rtm.parseRouteMessage, + sysRTM_LOSING: rtm.parseRouteMessage, + sysRTM_REDIRECT: rtm.parseRouteMessage, + sysRTM_MISS: rtm.parseRouteMessage, + sysRTM_LOCK: rtm.parseRouteMessage, + sysRTM_RESOLVE: rtm.parseRouteMessage, + sysRTM_NEWADDR: ifam.parseInterfaceAddrMessage, + sysRTM_DELADDR: ifam.parseInterfaceAddrMessage, + sysRTM_IFANNOUNCE: ifanm.parseInterfaceAnnounceMessage, + sysRTM_IFINFO: ifm.parseInterfaceMessage, + } +} diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go new file mode 100644 index 00000000000..26d04386961 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_openbsd.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STATS, sysNET_RT_TABLE: + return false + default: + return true + } +} + +// A RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[60:64])), + }, + } +} + +// A InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[24]), + MTU: int(nativeEndian.Uint32(m.raw[28:32])), + }, + } +} + +func probeRoutingStack() (int, map[int]parseFn) { + var p uintptr + nooff := &wireFormat{extOff: -1, bodyOff: -1} + return int(unsafe.Sizeof(p)), map[int]parseFn{ + sysRTM_ADD: nooff.parseRouteMessage, + sysRTM_DELETE: nooff.parseRouteMessage, + sysRTM_CHANGE: nooff.parseRouteMessage, + sysRTM_GET: nooff.parseRouteMessage, + sysRTM_LOSING: nooff.parseRouteMessage, + sysRTM_REDIRECT: nooff.parseRouteMessage, + sysRTM_MISS: nooff.parseRouteMessage, + sysRTM_LOCK: nooff.parseRouteMessage, + sysRTM_RESOLVE: nooff.parseRouteMessage, + sysRTM_NEWADDR: nooff.parseInterfaceAddrMessage, + sysRTM_DELADDR: nooff.parseInterfaceAddrMessage, + sysRTM_IFINFO: nooff.parseInterfaceMessage, + sysRTM_IFANNOUNCE: nooff.parseInterfaceAnnounceMessage, + } +} diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go new file mode 100644 index 00000000000..d136325a30a --- /dev/null +++ b/vendor/golang.org/x/net/route/syscall.go @@ -0,0 +1,33 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "syscall" + "unsafe" +) + +// TODO: replace with runtime.KeepAlive when available +//go:noescape +func keepAlive(p unsafe.Pointer) + +var zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var p unsafe.Pointer + if len(mib) > 0 { + p = unsafe.Pointer(&mib[0]) + } else { + p = unsafe.Pointer(&zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + keepAlive(p) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/route/syscall.s b/vendor/golang.org/x/net/route/syscall.s new file mode 100644 index 00000000000..fa6297f0aa1 --- /dev/null +++ b/vendor/golang.org/x/net/route/syscall.s @@ -0,0 +1,8 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·keepAlive(SB),NOSPLIT,$0 + RET diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go new file mode 100644 index 00000000000..265b81cd50e --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_darwin.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1e + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STAT = 0x4 + sysNET_RT_TRASH = 0x5 + sysNET_RT_IFLIST2 = 0x6 + sysNET_RT_DUMP2 = 0x7 + sysNET_RT_MAXID = 0xa +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_MAXID = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFINFO2 = 0x12 + sysRTM_NEWMADDR2 = 0x13 + sysRTM_GET2 = 0x14 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrDarwin15 = 0x70 + sizeofIfaMsghdrDarwin15 = 0x14 + sizeofIfmaMsghdrDarwin15 = 0x10 + sizeofIfMsghdr2Darwin15 = 0xa0 + sizeofIfmaMsghdr2Darwin15 = 0x14 + sizeofIfDataDarwin15 = 0x60 + sizeofIfData64Darwin15 = 0x80 + + sizeofRtMsghdrDarwin15 = 0x5c + sizeofRtMsghdr2Darwin15 = 0x5c + sizeofRtMetricsDarwin15 = 0x38 +) diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go new file mode 100644 index 00000000000..dd36dece0f6 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_dragonfly.go @@ -0,0 +1,92 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_MAXID = 0x4 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 + sysCTL_LWKT = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x6 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_MPLS1 = 0x100 + sysRTA_MPLS2 = 0x200 + sysRTA_MPLS3 = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MPLS1 = 0x8 + sysRTAX_MPLS2 = 0x9 + sysRTAX_MPLS3 = 0xa + sysRTAX_MAX = 0xb +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = 0xb0 + sizeofIfaMsghdrDragonFlyBSD4 = 0x14 + sizeofIfmaMsghdrDragonFlyBSD4 = 0x10 + sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18 + + sizeofRtMsghdrDragonFlyBSD4 = 0x98 + sizeofRtMetricsDragonFlyBSD4 = 0x70 +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go new file mode 100644 index 00000000000..9bac2e3900f --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_386.go @@ -0,0 +1,120 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x60 + sizeofIfMsghdrFreeBSD8 = 0x60 + sizeofIfMsghdrFreeBSD9 = 0x60 + sizeofIfMsghdrFreeBSD10 = 0x64 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x50 + sizeofIfDataFreeBSD8 = 0x50 + sizeofIfDataFreeBSD9 = 0x50 + sizeofIfDataFreeBSD10 = 0x54 + sizeofIfDataFreeBSD11 = 0x98 + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go new file mode 100644 index 00000000000..b1920d7ac16 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go @@ -0,0 +1,117 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0xb0 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0xb0 + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x98 + sizeofRtMetricsFreeBSD10 = 0x70 + + sizeofIfMsghdrFreeBSD7 = 0xa8 + sizeofIfMsghdrFreeBSD8 = 0xa8 + sizeofIfMsghdrFreeBSD9 = 0xa8 + sizeofIfMsghdrFreeBSD10 = 0xa8 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x98 + sizeofIfDataFreeBSD8 = 0x98 + sizeofIfDataFreeBSD9 = 0x98 + sizeofIfDataFreeBSD10 = 0x98 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go new file mode 100644 index 00000000000..a034d6fcbf2 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go @@ -0,0 +1,117 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x70 + sizeofIfMsghdrFreeBSD8 = 0x70 + sizeofIfMsghdrFreeBSD9 = 0x70 + sizeofIfMsghdrFreeBSD10 = 0x70 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x60 + sizeofIfDataFreeBSD8 = 0x60 + sizeofIfDataFreeBSD9 = 0x60 + sizeofIfDataFreeBSD10 = 0x60 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0x68 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0x6c + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x5c + sizeofRtMetricsFreeBSD10Emu = 0x38 + + sizeofIfMsghdrFreeBSD7Emu = 0x70 + sizeofIfMsghdrFreeBSD8Emu = 0x70 + sizeofIfMsghdrFreeBSD9Emu = 0x70 + sizeofIfMsghdrFreeBSD10Emu = 0x70 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x60 + sizeofIfDataFreeBSD8Emu = 0x60 + sizeofIfDataFreeBSD9Emu = 0x60 + sizeofIfDataFreeBSD10Emu = 0x60 + sizeofIfDataFreeBSD11Emu = 0x98 +) diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go new file mode 100644 index 00000000000..aa4aad1613e --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_netbsd.go @@ -0,0 +1,91 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x22 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x5 + sysNET_RT_MAXID = 0x6 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_DDB = 0x9 + sysCTL_PROC = 0xa + sysCTL_VENDOR = 0xb + sysCTL_EMUL = 0xc + sysCTL_SECURITY = 0xd + sysCTL_MAXID = 0xe +) + +const ( + sysRTM_VERSION = 0x4 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFANNOUNCE = 0x10 + sysRTM_IEEE80211 = 0x11 + sysRTM_SETGATE = 0x12 + sysRTM_LLINFO_UPD = 0x13 + sysRTM_IFINFO = 0x14 + sysRTM_CHGADDR = 0x15 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_TAG = 0x100 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_TAG = 0x8 + sysRTAX_MAX = 0x9 +) + +const ( + sizeofIfMsghdrNetBSD7 = 0x98 + sizeofIfaMsghdrNetBSD7 = 0x18 + sizeofIfAnnouncemsghdrNetBSD7 = 0x18 + + sizeofRtMsghdrNetBSD7 = 0x78 + sizeofRtMetricsNetBSD7 = 0x50 +) diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go new file mode 100644 index 00000000000..4fadc4e8fa1 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_openbsd.go @@ -0,0 +1,80 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STATS = 0x4 + sysNET_RT_TABLE = 0x5 + sysNET_RT_IFNAMES = 0x6 + sysNET_RT_MAXID = 0x7 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_FS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_DDB = 0x9 + sysCTL_VFS = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_IFANNOUNCE = 0xf + sysRTM_DESYNC = 0x10 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_SRC = 0x100 + sysRTA_SRCMASK = 0x200 + sysRTA_LABEL = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_SRC = 0x8 + sysRTAX_SRCMASK = 0x9 + sysRTAX_LABEL = 0xa + sysRTAX_MAX = 0xb +) diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go new file mode 100644 index 00000000000..d384b9332d9 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram_test.go @@ -0,0 +1,325 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "testing" +) + +type sumTest struct { + value int64 + sum int64 + sumOfSquares float64 + total int64 +} + +var sumTests = []sumTest{ + {100, 100, 10000, 1}, + {50, 150, 12500, 2}, + {50, 200, 15000, 3}, + {50, 250, 17500, 4}, +} + +type bucketingTest struct { + in int64 + log int + bucket int +} + +var bucketingTests = []bucketingTest{ + {0, 0, 0}, + {1, 1, 0}, + {2, 2, 1}, + {3, 2, 1}, + {4, 3, 2}, + {1000, 10, 9}, + {1023, 10, 9}, + {1024, 11, 10}, + {1000000, 20, 19}, +} + +type multiplyTest struct { + in int64 + ratio float64 + expectedSum int64 + expectedTotal int64 + expectedSumOfSquares float64 +} + +var multiplyTests = []multiplyTest{ + {15, 2.5, 37, 2, 562.5}, + {128, 4.6, 758, 13, 77953.9}, +} + +type percentileTest struct { + fraction float64 + expected int64 +} + +var percentileTests = []percentileTest{ + {0.25, 48}, + {0.5, 96}, + {0.6, 109}, + {0.75, 128}, + {0.90, 205}, + {0.95, 230}, + {0.99, 256}, +} + +func TestSum(t *testing.T) { + var h histogram + + for _, test := range sumTests { + h.addMeasurement(test.value) + sum := h.sum + if sum != test.sum { + t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) + } + + sumOfSquares := h.sumOfSquares + if sumOfSquares != test.sumOfSquares { + t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) + } + + total := h.total() + if total != test.total { + t.Errorf("h.Total = %v WANT: %v", total, test.total) + } + } +} + +func TestMultiply(t *testing.T) { + var h histogram + for i, test := range multiplyTests { + h.addMeasurement(test.in) + h.Multiply(test.ratio) + if h.sum != test.expectedSum { + t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) + } + if h.total() != test.expectedTotal { + t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) + } + if h.sumOfSquares != test.expectedSumOfSquares { + t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) + } + } +} + +func TestBucketingFunctions(t *testing.T) { + for _, test := range bucketingTests { + log := log2(test.in) + if log != test.log { + t.Errorf("log2 = %v WANT: %v", log, test.log) + } + + bucket := getBucket(test.in) + if bucket != test.bucket { + t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) + } + } +} + +func TestAverage(t *testing.T) { + a := new(histogram) + average := a.average() + if average != 0 { + t.Errorf("Average of empty histogram was %v WANT: 0", average) + } + + a.addMeasurement(1) + a.addMeasurement(1) + a.addMeasurement(3) + const expected = float64(5) / float64(3) + average = a.average() + + if !isApproximate(average, expected) { + t.Errorf("Average = %g WANT: %v", average, expected) + } +} + +func TestStandardDeviation(t *testing.T) { + a := new(histogram) + add(a, 10, 1<<4) + add(a, 10, 1<<5) + add(a, 10, 1<<6) + stdDev := a.standardDeviation() + const expected = 19.95 + + if !isApproximate(stdDev, expected) { + t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) + } + + // No values + a = new(histogram) + stdDev = a.standardDeviation() + + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 1, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 10, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } +} + +func TestPercentileBoundary(t *testing.T) { + a := new(histogram) + add(a, 5, 1<<4) + add(a, 10, 1<<6) + add(a, 5, 1<<7) + + for _, test := range percentileTests { + percentile := a.percentileBoundary(test.fraction) + if percentile != test.expected { + t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) + } + } +} + +func TestCopyFrom(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} + + a.CopyFrom(&b) + + if a.String() != b.String() { + t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) + } +} + +func TestClear(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + + a.Clear() + + expected := "0, 0.000000, 0, 0, []" + if a.String() != expected { + t.Errorf("a.String = %s WANT %s", a.String(), expected) + } +} + +func TestNew(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := "0, 0.000000, 0, 0, []" + if b.(*histogram).String() != expected { + t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) + } +} + +func TestAdd(t *testing.T) { + // The tests here depend on the associativity of addMeasurement and Add. + // Add empty observation + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := a.String() + a.Add(b) + if a.String() != expected { + t.Errorf("a.String = %s WANT: %s", a.String(), expected) + } + + // Add same bucketed value, no new buckets + c := new(histogram) + d := new(histogram) + e := new(histogram) + c.addMeasurement(12) + d.addMeasurement(11) + e.addMeasurement(12) + e.addMeasurement(11) + c.Add(d) + if c.String() != e.String() { + t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) + } + + // Add bucketed values + f := new(histogram) + g := new(histogram) + h := new(histogram) + f.addMeasurement(4) + f.addMeasurement(12) + f.addMeasurement(100) + g.addMeasurement(18) + g.addMeasurement(36) + g.addMeasurement(255) + h.addMeasurement(4) + h.addMeasurement(12) + h.addMeasurement(100) + h.addMeasurement(18) + h.addMeasurement(36) + h.addMeasurement(255) + f.Add(g) + if f.String() != h.String() { + t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) + } + + // add buckets to no buckets + i := new(histogram) + j := new(histogram) + k := new(histogram) + j.addMeasurement(18) + j.addMeasurement(36) + j.addMeasurement(255) + k.addMeasurement(18) + k.addMeasurement(36) + k.addMeasurement(255) + i.Add(j) + if i.String() != k.String() { + t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) + } + + // add buckets to single value (no overlap) + l := new(histogram) + m := new(histogram) + n := new(histogram) + l.addMeasurement(0) + m.addMeasurement(18) + m.addMeasurement(36) + m.addMeasurement(255) + n.addMeasurement(0) + n.addMeasurement(18) + n.addMeasurement(36) + n.addMeasurement(255) + l.Add(m) + if l.String() != n.String() { + t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) + } + + // mixed order + o := new(histogram) + p := new(histogram) + o.addMeasurement(0) + o.addMeasurement(2) + o.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(2) + if o.String() != p.String() { + t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) + } +} + +func add(h *histogram, times int, val int64) { + for i := 0; i < times; i++ { + h.addMeasurement(val) + } +} + +func isApproximate(x, y float64) bool { + return math.Abs(x-y) < 1e-2 +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go index c44cb7ec9e7..d860fccf9bd 100644 --- a/vendor/golang.org/x/net/trace/trace.go +++ b/vendor/golang.org/x/net/trace/trace.go @@ -93,13 +93,17 @@ var DebugUseAfterFinish = false // // AuthRequest may be replaced by a program to customise its authorisation requirements. // -// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. host, _, err := net.SplitHostPort(req.RemoteAddr) - switch { - case err != nil: // Badly formed address; fail closed. - return false, false - case host == "localhost" || host == "127.0.0.1" || host == "::1": + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": return true, true default: return false, false @@ -113,6 +117,7 @@ func init() { http.Error(w, "not allowed", http.StatusUnauthorized) return } + w.Header().Set("Content-Type", "text/html; charset=utf-8") Render(w, req, sensitive) }) http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { @@ -121,6 +126,7 @@ func init() { http.Error(w, "not allowed", http.StatusUnauthorized) return } + w.Header().Set("Content-Type", "text/html; charset=utf-8") RenderEvents(w, req, sensitive) }) } @@ -172,7 +178,7 @@ func Render(w io.Writer, req *http.Request, sensitive bool) { completedMu.RLock() data.Families = make([]string, 0, len(completedTraces)) - for fam, _ := range completedTraces { + for fam := range completedTraces { data.Families = append(data.Families, fam) } completedMu.RUnlock() diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go new file mode 100644 index 00000000000..14d7c237aac --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_test.go @@ -0,0 +1,71 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "net/http" + "reflect" + "testing" +) + +type s struct{} + +func (s) String() string { return "lazy string" } + +// TestReset checks whether all the fields are zeroed after reset. +func TestReset(t *testing.T) { + tr := New("foo", "bar") + tr.LazyLog(s{}, false) + tr.LazyPrintf("%d", 1) + tr.SetRecycler(func(_ interface{}) {}) + tr.SetTraceInfo(3, 4) + tr.SetMaxEvents(100) + tr.SetError() + tr.Finish() + + tr.(*trace).reset() + + if !reflect.DeepEqual(tr, new(trace)) { + t.Errorf("reset didn't clear all fields: %+v", tr) + } +} + +// TestResetLog checks whether all the fields are zeroed after reset. +func TestResetLog(t *testing.T) { + el := NewEventLog("foo", "bar") + el.Printf("message") + el.Errorf("error") + el.Finish() + + el.(*eventLog).reset() + + if !reflect.DeepEqual(el, new(eventLog)) { + t.Errorf("reset didn't clear all fields: %+v", el) + } +} + +func TestAuthRequest(t *testing.T) { + testCases := []struct { + host string + want bool + }{ + {host: "192.168.23.1", want: false}, + {host: "192.168.23.1:8080", want: false}, + {host: "malformed remote addr", want: false}, + {host: "localhost", want: true}, + {host: "localhost:8080", want: true}, + {host: "127.0.0.1", want: true}, + {host: "127.0.0.1:8080", want: true}, + {host: "::1", want: true}, + {host: "[::1]:8080", want: true}, + } + for _, tt := range testCases { + req := &http.Request{RemoteAddr: tt.host} + any, sensitive := AuthRequest(req) + if any != tt.want || sensitive != tt.want { + t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go new file mode 100644 index 00000000000..3d95c6cba3e --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file.go @@ -0,0 +1,794 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(name string, perm os.FileMode) error + OpenFile(name string, flag int, perm os.FileMode) (File, error) + RemoveAll(name string) error + Rename(oldName, newName string) error + Stat(name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +func (d Dir) RemoveAll(name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case os.SEEK_SET: + npos = int(offset) + case os.SEEK_CUR: + npos += int(offset) + case os.SEEK_END: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go new file mode 100644 index 00000000000..cbd0240ab18 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_test.go @@ -0,0 +1,1169 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" +) + +func TestSlashClean(t *testing.T) { + testCases := []string{ + "", + ".", + "/", + "/./", + "//", + "//.", + "//a", + "/a", + "/a/b/c", + "/a//b/./../c/d/", + "a", + "a/b/c", + } + for _, tc := range testCases { + got := slashClean(tc) + want := path.Clean("/" + tc) + if got != want { + t.Errorf("tc=%q: got %q, want %q", tc, got, want) + } + } +} + +func TestDirResolve(t *testing.T) { + testCases := []struct { + dir, name, want string + }{ + {"/", "", "/"}, + {"/", "/", "/"}, + {"/", ".", "/"}, + {"/", "./a", "/a"}, + {"/", "..", "/"}, + {"/", "..", "/"}, + {"/", "../", "/"}, + {"/", "../.", "/"}, + {"/", "../a", "/a"}, + {"/", "../..", "/"}, + {"/", "../bar/a", "/bar/a"}, + {"/", "../baz/a", "/baz/a"}, + {"/", "...", "/..."}, + {"/", ".../a", "/.../a"}, + {"/", ".../..", "/"}, + {"/", "a", "/a"}, + {"/", "a/./b", "/a/b"}, + {"/", "a/../../b", "/b"}, + {"/", "a/../b", "/b"}, + {"/", "a/b", "/a/b"}, + {"/", "a/b/c/../../d", "/a/d"}, + {"/", "a/b/c/../../../d", "/d"}, + {"/", "a/b/c/../../../../d", "/d"}, + {"/", "a/b/c/d", "/a/b/c/d"}, + + {"/foo/bar", "", "/foo/bar"}, + {"/foo/bar", "/", "/foo/bar"}, + {"/foo/bar", ".", "/foo/bar"}, + {"/foo/bar", "./a", "/foo/bar/a"}, + {"/foo/bar", "..", "/foo/bar"}, + {"/foo/bar", "../", "/foo/bar"}, + {"/foo/bar", "../.", "/foo/bar"}, + {"/foo/bar", "../a", "/foo/bar/a"}, + {"/foo/bar", "../..", "/foo/bar"}, + {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, + {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, + {"/foo/bar", "...", "/foo/bar/..."}, + {"/foo/bar", ".../a", "/foo/bar/.../a"}, + {"/foo/bar", ".../..", "/foo/bar"}, + {"/foo/bar", "a", "/foo/bar/a"}, + {"/foo/bar", "a/./b", "/foo/bar/a/b"}, + {"/foo/bar", "a/../../b", "/foo/bar/b"}, + {"/foo/bar", "a/../b", "/foo/bar/b"}, + {"/foo/bar", "a/b", "/foo/bar/a/b"}, + {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, + {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, + + {"/foo/bar/", "", "/foo/bar"}, + {"/foo/bar/", "/", "/foo/bar"}, + {"/foo/bar/", ".", "/foo/bar"}, + {"/foo/bar/", "./a", "/foo/bar/a"}, + {"/foo/bar/", "..", "/foo/bar"}, + + {"/foo//bar///", "", "/foo/bar"}, + {"/foo//bar///", "/", "/foo/bar"}, + {"/foo//bar///", ".", "/foo/bar"}, + {"/foo//bar///", "./a", "/foo/bar/a"}, + {"/foo//bar///", "..", "/foo/bar"}, + + {"/x/y/z", "ab/c\x00d/ef", ""}, + + {".", "", "."}, + {".", "/", "."}, + {".", ".", "."}, + {".", "./a", "a"}, + {".", "..", "."}, + {".", "..", "."}, + {".", "../", "."}, + {".", "../.", "."}, + {".", "../a", "a"}, + {".", "../..", "."}, + {".", "../bar/a", "bar/a"}, + {".", "../baz/a", "baz/a"}, + {".", "...", "..."}, + {".", ".../a", ".../a"}, + {".", ".../..", "."}, + {".", "a", "a"}, + {".", "a/./b", "a/b"}, + {".", "a/../../b", "b"}, + {".", "a/../b", "b"}, + {".", "a/b", "a/b"}, + {".", "a/b/c/../../d", "a/d"}, + {".", "a/b/c/../../../d", "d"}, + {".", "a/b/c/../../../../d", "d"}, + {".", "a/b/c/d", "a/b/c/d"}, + + {"", "", "."}, + {"", "/", "."}, + {"", ".", "."}, + {"", "./a", "a"}, + {"", "..", "."}, + } + + for _, tc := range testCases { + d := Dir(filepath.FromSlash(tc.dir)) + if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { + t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + type walkStep struct { + name, frag string + final bool + } + + testCases := []struct { + dir string + want []walkStep + }{ + {"", []walkStep{ + {"", "", true}, + }}, + {"/", []walkStep{ + {"", "", true}, + }}, + {"/a", []walkStep{ + {"", "a", true}, + }}, + {"/a/", []walkStep{ + {"", "a", true}, + }}, + {"/a/b", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/c", []walkStep{ + {"", "a", false}, + {"a", "b", false}, + {"b", "c", true}, + }}, + // The following test case is the one mentioned explicitly + // in the method description. + {"/foo/bar/x", []walkStep{ + {"", "foo", false}, + {"foo", "bar", false}, + {"bar", "x", true}, + }}, + } + + for _, tc := range testCases { + fs := NewMemFS().(*memFS) + + parts := strings.Split(tc.dir, "/") + for p := 2; p < len(parts); p++ { + d := strings.Join(parts[:p], "/") + if err := fs.Mkdir(d, 0666); err != nil { + t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) + } + } + + i, prevFrag := 0, "" + err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { + got := walkStep{ + name: prevFrag, + frag: frag, + final: final, + } + want := tc.want[i] + + if got != want { + return fmt.Errorf("got %+v, want %+v", got, want) + } + i, prevFrag = i+1, frag + return nil + }) + if err != nil { + t.Errorf("tc.dir=%q: %v", tc.dir, err) + } + } +} + +// find appends to ss the names of the named file and its children. It is +// analogous to the Unix find command. +// +// The returned strings are not guaranteed to be in any particular order. +func find(ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(name) + if err != nil { + return nil, err + } + ss = append(ss, name) + if stat.IsDir() { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + return nil, err + } + for _, c := range children { + ss, err = find(ss, fs, path.Join(name, c.Name())) + if err != nil { + return nil, err + } + } + } + return ss, nil +} + +func testFS(t *testing.T, fs FileSystem) { + errStr := func(err error) string { + switch { + case os.IsExist(err): + return "errExist" + case os.IsNotExist(err): + return "errNotExist" + case err != nil: + return "err" + } + return "ok" + } + + // The non-"find" non-"stat" test cases should change the file system state. The + // indentation of the "find"s and "stat"s helps distinguish such test cases. + testCases := []string{ + " stat / want dir", + " stat /a want errNotExist", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + "create /a A want ok", + " stat /a want 1", + "create /d/e EEE want errNotExist", + "mk-dir /a want errExist", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + " stat /d want dir", + "create /d/e EEE want ok", + " stat /d/e want 3", + " find / /a /d /d/e", + "create /d/f FFFF want ok", + "create /d/g GGGGGGG want ok", + "mk-dir /d/m want ok", + "mk-dir /d/m want errExist", + "create /d/m/p PPPPP want ok", + " stat /d/e want 3", + " stat /d/f want 4", + " stat /d/g want 7", + " stat /d/h want errNotExist", + " stat /d/m want dir", + " stat /d/m/p want 5", + " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", + "rm-all /d want ok", + " stat /a want 1", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + " stat /d/f want errNotExist", + " stat /d/g want errNotExist", + " stat /d/m want errNotExist", + " stat /d/m/p want errNotExist", + " find / /a", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + "create /d/f FFFF want ok", + "rm-all /d/f want ok", + "mk-dir /d/m want ok", + "rm-all /z want ok", + "rm-all / want err", + "create /b BB want ok", + " stat / want dir", + " stat /a want 1", + " stat /b want 2", + " stat /c want errNotExist", + " stat /d want dir", + " stat /d/m want dir", + " find / /a /b /d /d/m", + "move__ o=F /b /c want ok", + " stat /b want errNotExist", + " stat /c want 2", + " stat /d/m want dir", + " stat /d/n want errNotExist", + " find / /a /c /d /d/m", + "move__ o=F /d/m /d/n want ok", + "create /d/n/q QQQQ want ok", + " stat /d/m want errNotExist", + " stat /d/n want dir", + " stat /d/n/q want 4", + "move__ o=F /d /d/n/z want err", + "move__ o=T /c /d/n/q want ok", + " stat /c want errNotExist", + " stat /d/n/q want 2", + " find / /a /d /d/n /d/n/q", + "create /d/n/r RRRRR want ok", + "mk-dir /u want ok", + "mk-dir /u/v want ok", + "move__ o=F /d/n /u want errExist", + "create /t TTTTTT want ok", + "move__ o=F /d/n /t want errExist", + "rm-all /t want ok", + "move__ o=F /d/n /t want ok", + " stat /d want dir", + " stat /d/n want errNotExist", + " stat /d/n/r want errNotExist", + " stat /t want dir", + " stat /t/q want 2", + " stat /t/r want 5", + " find / /a /d /t /t/q /t/r /u /u/v", + "move__ o=F /t / want errExist", + "move__ o=T /t /u/v want ok", + " stat /u/v/r want 5", + "move__ o=F / /z want err", + " find / /a /d /u /u/v /u/v/q /u/v/r", + " stat /a want 1", + " stat /b want errNotExist", + " stat /c want errNotExist", + " stat /u/v/r want 5", + "copy__ o=F d=0 /a /b want ok", + "copy__ o=T d=0 /a /c want ok", + " stat /a want 1", + " stat /b want 1", + " stat /c want 1", + " stat /u/v/r want 5", + "copy__ o=F d=0 /u/v/r /b want errExist", + " stat /b want 1", + "copy__ o=T d=0 /u/v/r /b want ok", + " stat /a want 1", + " stat /b want 5", + " stat /u/v/r want 5", + "rm-all /a want ok", + "rm-all /b want ok", + "mk-dir /u/v/w want ok", + "create /u/v/w/s SSSSSSSS want ok", + " stat /d want dir", + " stat /d/x want errNotExist", + " stat /d/y want errNotExist", + " stat /u/v/r want 5", + " stat /u/v/w/s want 8", + " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", + "copy__ o=T d=0 /u/v /d/x want ok", + "copy__ o=T d=∞ /u/v /d/y want ok", + "rm-all /u want ok", + " stat /d/x want dir", + " stat /d/x/q want errNotExist", + " stat /d/x/r want errNotExist", + " stat /d/x/w want errNotExist", + " stat /d/x/w/s want errNotExist", + " stat /d/y want dir", + " stat /d/y/q want 2", + " stat /d/y/r want 5", + " stat /d/y/w want dir", + " stat /d/y/w/s want 8", + " stat /u want errNotExist", + " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", + "copy__ o=F d=∞ /d/y /d/x want errExist", + } + + for i, tc := range testCases { + tc = strings.TrimSpace(tc) + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create": + parts := strings.Split(arg, " ") + if len(parts) != 4 || parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid write", i, tc) + } + f, opErr := fs.OpenFile(parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if got := errStr(opErr); got != parts[3] { + t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) + } + if f != nil { + if _, err := f.Write([]byte(parts[1])); err != nil { + t.Fatalf("test case #%d %q: Write: %v", i, tc, err) + } + if err := f.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + } + + case "find": + got, err := find(nil, fs, "/") + if err != nil { + t.Fatalf("test case #%d %q: find: %v", i, tc, err) + } + sort.Strings(got) + want := strings.Split(arg, " ") + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) + } + + case "copy__", "mk-dir", "move__", "rm-all", "stat": + nParts := 3 + switch op { + case "copy__": + nParts = 6 + case "move__": + nParts = 5 + } + parts := strings.Split(arg, " ") + if len(parts) != nParts { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + + got, opErr := "", error(nil) + switch op { + case "copy__": + depth := 0 + if parts[1] == "d=∞" { + depth = infiniteDepth + } + _, opErr = copyFiles(fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + case "mk-dir": + opErr = fs.Mkdir(parts[0], 0777) + case "move__": + _, opErr = moveFiles(fs, parts[1], parts[2], parts[0] == "o=T") + case "rm-all": + opErr = fs.RemoveAll(parts[0]) + case "stat": + var stat os.FileInfo + fileName := parts[0] + if stat, opErr = fs.Stat(fileName); opErr == nil { + if stat.IsDir() { + got = "dir" + } else { + got = strconv.Itoa(int(stat.Size())) + } + + if fileName == "/" { + // For a Dir FileSystem, the virtual file system root maps to a + // real file system name like "/tmp/webdav-test012345", which does + // not end with "/". We skip such cases. + } else if statName := stat.Name(); path.Base(fileName) != statName { + t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", + i, tc, fileName, statName) + } + } + } + if got == "" { + got = errStr(opErr) + } + + if parts[len(parts)-2] != "want" { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + if want := parts[len(parts)-1]; got != want { + t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) + } + } + } +} + +func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + + td, err := ioutil.TempDir("", "webdav-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + testFS(t, Dir(td)) +} + +func TestMemFS(t *testing.T) { + testFS(t, NewMemFS()) +} + +func TestMemFSRoot(t *testing.T) { + fs := NewMemFS() + for i := 0; i < 5; i++ { + stat, err := fs.Stat("/") + if err != nil { + t.Fatalf("i=%d: Stat: %v", i, err) + } + if !stat.IsDir() { + t.Fatalf("i=%d: Stat.IsDir is false, want true", i) + } + + f, err := fs.OpenFile("/", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("i=%d: OpenFile: %v", i, err) + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + t.Fatalf("i=%d: Readdir: %v", i, err) + } + if len(children) != i { + t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) + } + + if _, err := f.Write(make([]byte, 1)); err == nil { + t.Fatalf("i=%d: Write: got nil error, want non-nil", i) + } + + if err := fs.Mkdir(fmt.Sprintf("/dir%d", i), 0777); err != nil { + t.Fatalf("i=%d: Mkdir: %v", i, err) + } + } +} + +func TestMemFileReaddir(t *testing.T) { + fs := NewMemFS() + if err := fs.Mkdir("/foo", 0777); err != nil { + t.Fatalf("Mkdir: %v", err) + } + readdir := func(count int) ([]os.FileInfo, error) { + f, err := fs.OpenFile("/foo", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + return f.Readdir(count) + } + if got, err := readdir(-1); len(got) != 0 || err != nil { + t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, ", len(got), err) + } + if got, err := readdir(+1); len(got) != 0 || err != io.EOF { + t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) + } +} + +func TestMemFile(t *testing.T) { + testCases := []string{ + "wantData ", + "wantSize 0", + "write abc", + "wantData abc", + "write de", + "wantData abcde", + "wantSize 5", + "write 5*x", + "write 4*y+2*z", + "write 3*st", + "wantData abcdexxxxxyyyyzzststst", + "wantSize 22", + "seek set 4 want 4", + "write EFG", + "wantData abcdEFGxxxyyyyzzststst", + "wantSize 22", + "seek set 2 want 2", + "read cdEF", + "read Gx", + "seek cur 0 want 8", + "seek cur 2 want 10", + "seek cur -1 want 9", + "write J", + "wantData abcdEFGxxJyyyyzzststst", + "wantSize 22", + "seek cur -4 want 6", + "write ghijk", + "wantData abcdEFghijkyyyzzststst", + "wantSize 22", + "read yyyz", + "seek cur 0 want 15", + "write ", + "seek cur 0 want 15", + "read ", + "seek cur 0 want 15", + "seek end -3 want 19", + "write ZZ", + "wantData abcdEFghijkyyyzzstsZZt", + "wantSize 22", + "write 4*A", + "wantData abcdEFghijkyyyzzstsZZAAAA", + "wantSize 25", + "seek end 0 want 25", + "seek end -5 want 20", + "read Z+4*A", + "write 5*B", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", + "wantSize 30", + "seek end 10 want 40", + "write C", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", + "wantSize 41", + "write D", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", + "wantSize 42", + "seek set 43 want 43", + "write E", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", + "wantSize 44", + "seek set 0 want 0", + "write 5*123456789_", + "wantData 123456789_123456789_123456789_123456789_123456789_", + "wantSize 50", + "seek cur 0 want 50", + "seek cur -99 want err", + } + + const filename = "/foo" + fs := NewMemFS() + f, err := fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + // Expand an arg like "3*a+2*b" to "aaabb". + parts := strings.Split(arg, "+") + for j, part := range parts { + if k := strings.IndexByte(part, '*'); k >= 0 { + repeatCount, repeatStr := part[:k], part[k+1:] + n, err := strconv.Atoi(repeatCount) + if err != nil { + t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) + } + parts[j] = strings.Repeat(repeatStr, n) + } + } + arg = strings.Join(parts, "") + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "read": + buf := make([]byte, len(arg)) + if _, err := io.ReadFull(f, buf); err != nil { + t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) + } + if got := string(buf); got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + + case "seek": + parts := strings.Split(arg, " ") + if len(parts) != 4 { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + + whence := 0 + switch parts[0] { + default: + t.Fatalf("test case #%d %q: invalid seek whence", i, tc) + case "set": + whence = os.SEEK_SET + case "cur": + whence = os.SEEK_CUR + case "end": + whence = os.SEEK_END + } + offset, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) + } + + if parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + if parts[3] == "err" { + _, err := f.Seek(int64(offset), whence) + if err == nil { + t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) + } + } else { + got, err := f.Seek(int64(offset), whence) + if err != nil { + t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) + } + want, err := strconv.Atoi(parts[3]) + if err != nil { + t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) + } + if got != int64(want) { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + + case "write": + n, err := f.Write([]byte(arg)) + if err != nil { + t.Fatalf("test case #%d %q: write: %v", i, tc, err) + } + if n != len(arg) { + t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) + } + + case "wantData": + g, err := fs.OpenFile(filename, os.O_RDONLY, 0666) + if err != nil { + t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) + } + gotBytes, err := ioutil.ReadAll(g) + if err != nil { + t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) + } + for i, c := range gotBytes { + if c == '\x00' { + gotBytes[i] = '.' + } + } + got := string(gotBytes) + if got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + if err := g.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + + case "wantSize": + n, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) + } + fi, err := fs.Stat(filename) + if err != nil { + t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) + } + if got, want := fi.Size(), int64(n); got != want { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + } +} + +// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a +// memFile doesn't allocate a new buffer for each of those N times. Otherwise, +// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. +func TestMemFileWriteAllocs(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo allocates here") + } + fs := NewMemFS() + f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + a := testing.AllocsPerRun(100, func() { + f.Write(xxx) + }) + // AllocsPerRun returns an integral value, so we compare the rounded-down + // number to zero. + if a > 0 { + t.Fatalf("%v allocs per run, want 0", a) + } +} + +func BenchmarkMemFileWrite(b *testing.B) { + fs := NewMemFS() + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + b.Fatalf("OpenFile: %v", err) + } + for j := 0; j < 100; j++ { + f.Write(xxx) + } + if err := f.Close(); err != nil { + b.Fatalf("Close: %v", err) + } + if err := fs.RemoveAll("/xxx"); err != nil { + b.Fatalf("RemoveAll: %v", err) + } + } +} + +func TestCopyMoveProps(t *testing.T) { + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + +func TestWalkFS(t *testing.T) { + testCases := []struct { + desc string + buildfs []string + startAt string + depth int + walkFn filepath.WalkFunc + want []string + }{{ + "just root", + []string{}, + "/", + infiniteDepth, + nil, + []string{ + "/", + }, + }, { + "infinite walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + infiniteDepth, + nil, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/d", + "/e", + "/f", + }, + }, { + "infinite walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/a", + infiniteDepth, + nil, + []string{ + "/a", + "/a/b", + "/a/b/c", + "/a/d", + }, + }, { + "depth 1 walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + 1, + nil, + []string{ + "/", + "/a", + "/e", + "/f", + }, + }, { + "depth 1 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 1, + nil, + []string{ + "/a/b", + "/a/b/c", + "/a/b/g", + }, + }, { + "depth 0 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk from file", + []string{ + "mkdir /a", + "touch /a/b", + "touch /a/c", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk with skipped subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + "touch /a/b/z", + }, + "/", + infiniteDepth, + func(path string, info os.FileInfo, err error) error { + if path == "/a/b/g" { + return filepath.SkipDir + } + return nil + }, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/z", + }, + }} + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + var got []string + traceFn := func(path string, info os.FileInfo, err error) error { + if tc.walkFn != nil { + err = tc.walkFn(path, info, err) + if err != nil { + return err + } + } + got = append(got, path) + return nil + } + fi, err := fs.Stat(tc.startAt) + if err != nil { + t.Fatalf("%s: cannot stat: %v", tc.desc, err) + } + err = walkFS(fs, tc.depth, tc.startAt, fi, traceFn) + if err != nil { + t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) + continue + } + sort.Strings(got) + sort.Strings(tc.want) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) + continue + } + } +} + +func buildTestFS(buildfs []string) (FileSystem, error) { + // TODO: Could this be merged with the build logic in TestFS? + + fs := NewMemFS() + for _, b := range buildfs { + op := strings.Split(b, " ") + switch op[0] { + case "mkdir": + err := fs.Mkdir(op[1], os.ModeDir|0777) + if err != nil { + return nil, err + } + case "touch": + f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + f.Close() + case "write": + f, err := fs.OpenFile(op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + _, err = f.Write([]byte(op[2])) + f.Close() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown file operation %q", op[0]) + } + } + return fs, nil +} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go new file mode 100644 index 00000000000..416e81cdfdd --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go new file mode 100644 index 00000000000..aad61a40100 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if_test.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + ``, + ifHeader{}, + }, { + "bad: no list after resource #2", + ` (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + ` (a) (b) `, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `() + ()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `( + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not + )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `() + (Not )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + ` + ( + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + ` (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + ` (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README new file mode 100644 index 00000000000..89656f48962 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go new file mode 100644 index 00000000000..a71284312af --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go new file mode 100644 index 00000000000..becedd58398 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go new file mode 100644 index 00000000000..3c3b6aca586 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go new file mode 100644 index 00000000000..5dc78e748b3 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `` + + `abc` + + `` + + `c1` + + `d1` + + `` + + `` + + `e1` + + `` + + ``, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `` + + `ab` + + `c` + + `` + + `c1` + + `d1` + + `` + + ``, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `` + + `b` + + `b1` + + ``, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `test`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: ``, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `hello world`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `hello`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello world`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: ``, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " \n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: ``, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: ``, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: ``, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ '> ]"), + }, + want: `'> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag does not match start tag ", + want: ``, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag in namespace another does not match start tag in namespace space", + want: ``, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: ``, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: ``, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: ``, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: ``, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: ``, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: ``, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: ``, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: ``, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{A{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "' '>' >", + " ]>", + " '<' ' doc ANY> ]>", + ">>> a < comment --> [ ] >", + } + testKO := []string{ + "<", + ">", + "", + "< > > < < >", + " -->", + "", + "'", + "", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go new file mode 100644 index 00000000000..75b9f2ba1b2 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go new file mode 100644 index 00000000000..02f1e10c330 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `
  • hello
    ` + + `world
    ` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
    ` + + `hello
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
    ` + + `
    `, + tab: Tables{}, + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
    world
    ` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go new file mode 100644 index 00000000000..c9a6421f280 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go new file mode 100644 index 00000000000..ffab4a70c9d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
    +// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&なまえ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&なまえ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

    ", "p", "nowrap"}, + {"

    ", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

    Foo

    \n\n

    Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go new file mode 100644 index 00000000000..514db5dd19a --- /dev/null +++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go @@ -0,0 +1,94 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program is a server for the WebDAV 'litmus' compliance test at +http://www.webdav.org/neon/litmus/ +To run the test: + +go run litmus_test_server.go + +and separately, from the downloaded litmus-xxx directory: + +make URL=http://localhost:9999/ check +*/ +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/webdav" +) + +var port = flag.Int("port", 9999, "server port") + +func main() { + flag.Parse() + log.SetFlags(0) + h := &webdav.Handler{ + FileSystem: webdav.NewMemFS(), + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + litmus := r.Header.Get("X-Litmus") + if len(litmus) > 19 { + litmus = litmus[:16] + "..." + } + + switch r.Method { + case "COPY", "MOVE": + dst := "" + if u, err := url.Parse(r.Header.Get("Destination")); err == nil { + dst = u.Path + } + o := r.Header.Get("Overwrite") + log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) + default: + log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) + } + }, + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) + + addr := fmt.Sprintf(":%d", *port) + log.Printf("Serving %v", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go new file mode 100644 index 00000000000..344ac5ceaf1 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "container/heap" + "errors" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") +) + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(now time.Time, token string) error +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root string + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool +} + +// NewMemLS returns a new in-memory LockSystem. +func NewMemLS() LockSystem { + return &memLS{ + byName: make(map[string]*memLSNode), + byToken: make(map[string]*memLSNode), + gen: uint64(time.Now().Unix()), + } +} + +type memLS struct { + mu sync.Mutex + byName map[string]*memLSNode + byToken map[string]*memLSNode + gen uint64 + // byExpiry only contains those nodes whose LockDetails have a finite + // Duration and are yet to expire. + byExpiry byExpiry +} + +func (m *memLS) nextToken() string { + m.gen++ + return strconv.FormatUint(m.gen, 10) +} + +func (m *memLS) collectExpiredNodes(now time.Time) { + for len(m.byExpiry) > 0 { + if now.Before(m.byExpiry[0].expiry) { + break + } + m.remove(m.byExpiry[0]) + } +} + +func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + var n0, n1 *memLSNode + if name0 != "" { + if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { + return nil, ErrConfirmationFailed + } + } + if name1 != "" { + if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { + return nil, ErrConfirmationFailed + } + } + + // Don't hold the same node twice. + if n1 == n0 { + n1 = nil + } + + if n0 != nil { + m.hold(n0) + } + if n1 != nil { + m.hold(n1) + } + return func() { + m.mu.Lock() + defer m.mu.Unlock() + if n1 != nil { + m.unhold(n1) + } + if n0 != nil { + m.unhold(n0) + } + }, nil +} + +// lookup returns the node n that locks the named resource, provided that n +// matches at least one of the given conditions and that lock isn't held by +// another party. Otherwise, it returns nil. +// +// n may be a parent of the named resource, if n is an infinite depth lock. +func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { + // TODO: support Condition.Not and Condition.ETag. + for _, c := range conditions { + n = m.byToken[c.Token] + if n == nil || n.held { + continue + } + if name == n.details.Root { + return n + } + if n.details.ZeroDepth { + continue + } + if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { + return n + } + } + return nil +} + +func (m *memLS) hold(n *memLSNode) { + if n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = true + if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func (m *memLS) unhold(n *memLSNode) { + if !n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = false + if n.details.Duration >= 0 { + heap.Push(&m.byExpiry, n) + } +} + +func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + details.Root = slashClean(details.Root) + + if !m.canCreate(details.Root, details.ZeroDepth) { + return "", ErrLocked + } + n := m.create(details.Root) + n.token = m.nextToken() + m.byToken[n.token] = n + n.details = details + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.token, nil +} + +func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return LockDetails{}, ErrNoSuchLock + } + if n.held { + return LockDetails{}, ErrLocked + } + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } + n.details.Duration = duration + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.details, nil +} + +func (m *memLS) Unlock(now time.Time, token string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return ErrNoSuchLock + } + if n.held { + return ErrLocked + } + m.remove(n) + return nil +} + +func (m *memLS) canCreate(name string, zeroDepth bool) bool { + return walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + return true + } + if first { + if n.token != "" { + // The target node is already locked. + return false + } + if !zeroDepth { + // The requested lock depth is infinite, and the fact that n exists + // (n != nil) means that a descendent of the target node is locked. + return false + } + } else if n.token != "" && !n.details.ZeroDepth { + // An ancestor of the target node is locked with infinite depth. + return false + } + return true + }) +} + +func (m *memLS) create(name string) (ret *memLSNode) { + walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + n = &memLSNode{ + details: LockDetails{ + Root: name0, + }, + byExpiryIndex: -1, + } + m.byName[name0] = n + } + n.refCount++ + if first { + ret = n + } + return true + }) + return ret +} + +func (m *memLS) remove(n *memLSNode) { + delete(m.byToken, n.token) + n.token = "" + walkToRoot(n.details.Root, func(name0 string, first bool) bool { + x := m.byName[name0] + x.refCount-- + if x.refCount == 0 { + delete(m.byName, name0) + } + return true + }) + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func walkToRoot(name string, f func(name0 string, first bool) bool) bool { + for first := true; ; first = false { + if !f(name, first) { + return false + } + if name == "/" { + break + } + name = name[:strings.LastIndex(name, "/")] + if name == "" { + name = "/" + } + } + return true +} + +type memLSNode struct { + // details are the lock metadata. Even if this node's name is not explicitly locked, + // details.Root will still equal the node's name. + details LockDetails + // token is the unique identifier for this node's lock. An empty token means that + // this node is not explicitly locked. + token string + // refCount is the number of self-or-descendent nodes that are explicitly locked. + refCount int + // expiry is when this node's lock expires. + expiry time.Time + // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 + // if this node does not expire, or has expired. + byExpiryIndex int + // held is whether this node's lock is actively held by a Confirm call. + held bool +} + +type byExpiry []*memLSNode + +func (b *byExpiry) Len() int { + return len(*b) +} + +func (b *byExpiry) Less(i, j int) bool { + return (*b)[i].expiry.Before((*b)[j].expiry) +} + +func (b *byExpiry) Swap(i, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] + (*b)[i].byExpiryIndex = i + (*b)[j].byExpiryIndex = j +} + +func (b *byExpiry) Push(x interface{}) { + n := x.(*memLSNode) + n.byExpiryIndex = len(*b) + *b = append(*b, n) +} + +func (b *byExpiry) Pop() interface{} { + i := len(*b) - 1 + n := (*b)[i] + (*b)[i] = nil + n.byExpiryIndex = -1 + *b = (*b)[:i] + return n +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, errInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, errInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, errInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go new file mode 100644 index 00000000000..116d6c0d7e4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock_test.go @@ -0,0 +1,731 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "fmt" + "math/rand" + "path" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +func TestWalkToRoot(t *testing.T) { + testCases := []struct { + name string + want []string + }{{ + "/a/b/c/d", + []string{ + "/a/b/c/d", + "/a/b/c", + "/a/b", + "/a", + "/", + }, + }, { + "/a", + []string{ + "/a", + "/", + }, + }, { + "/", + []string{ + "/", + }, + }} + + for _, tc := range testCases { + var got []string + if !walkToRoot(tc.name, func(name0 string, first bool) bool { + if first != (len(got) == 0) { + t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) + return false + } + got = append(got, name0) + return true + }) { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) + } + } +} + +var lockTestDurations = []time.Duration{ + infiniteTimeout, // infiniteTimeout means to never expire. + 0, // A zero duration means to expire immediately. + 100 * time.Hour, // A very large duration will not expire in these tests. +} + +// lockTestNames are the names of a set of mutually compatible locks. For each +// name fragment: +// - _ means no explicit lock. +// - i means a infinite-depth lock, +// - z means a zero-depth lock, +var lockTestNames = []string{ + "/_/_/_/_/z", + "/_/_/i", + "/_/z", + "/_/z/i", + "/_/z/z", + "/_/z/_/i", + "/_/z/_/z", + "/i", + "/z", + "/z/_/i", + "/z/_/z", +} + +func lockTestZeroDepth(name string) bool { + switch name[len(name)-1] { + case 'i': + return false + case 'z': + return true + } + panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) +} + +func TestMemLSCanCreate(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + for _, name := range lockTestNames { + _, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + } + + wantCanCreate := func(name string, zeroDepth bool) bool { + for _, n := range lockTestNames { + switch { + case n == name: + // An existing lock has the same name as the proposed lock. + return false + case strings.HasPrefix(n, name): + // An existing lock would be a child of the proposed lock, + // which conflicts if the proposed lock has infinite depth. + if !zeroDepth { + return false + } + case strings.HasPrefix(name, n): + // An existing lock would be an ancestor of the proposed lock, + // which conflicts if the ancestor has infinite depth. + if n[len(n)-1] == 'i' { + return false + } + } + } + return true + } + + var check func(int, string) + check = func(recursion int, name string) { + for _, zeroDepth := range []bool{false, true} { + got := m.canCreate(name, zeroDepth) + want := wantCanCreate(name, zeroDepth) + if got != want { + t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) + } + } + if recursion == 6 { + return + } + if name != "/" { + name += "/" + } + for _, c := range "_iz" { + check(recursion+1, name+string(c)) + } + } + check(0, "/") +} + +func TestMemLSLookup(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + badToken := m.nextToken() + t.Logf("badToken=%q", badToken) + + for _, name := range lockTestNames { + token, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) + } + + baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) + for _, baseName := range baseNames { + for _, suffix := range []string{"", "/0", "/1/2/3"} { + name := baseName + suffix + + goodToken := "" + base := m.byName[baseName] + if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { + goodToken = base.token + } + + for _, token := range []string{badToken, goodToken} { + if token == "" { + continue + } + + got := m.lookup(name, Condition{Token: token}) + want := base + if token == badToken { + want = nil + } + if got != want { + t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", + name, token, token == badToken, got, want) + } + } + } + } +} + +func TestMemLSConfirm(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + alice, err := m.Create(now, LockDetails{ + Root: "/alice", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + tweedle, err := m.Create(now, LockDetails{ + Root: "/tweedle", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + + // Test a mismatch between name and condition. + _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) + } + + // Test two names (that fall under the same lock) in the one Confirm call. + release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (twins): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (twins): inconsistent state: %v", err) + } + release() + if err := m.consistent(); err != nil { + t.Fatalf("release (twins): inconsistent state: %v", err) + } + + // Test the same two names in overlapping Confirm / release calls. + releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #0): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) + } + + _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) + } + + releaseDee() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #2): inconsistent state: %v", err) + } + + releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #3): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) + } + + // Test that you can't unlock a held lock. + err = m.Unlock(now, tweedle) + if err != ErrLocked { + t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) + } + + releaseDum() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #5): inconsistent state: %v", err) + } + + err = m.Unlock(now, tweedle) + if err != nil { + t.Fatalf("Unlock (sequence #6): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) + } +} + +func TestMemLSNonCanonicalRoot(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + token, err := m.Create(now, LockDetails{ + Root: "/foo/./bar//", + Duration: 1 * time.Second, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + if err := m.Unlock(now, token); err != nil { + t.Fatalf("Unlock: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock: inconsistent state: %v", err) + } +} + +func TestMemLSExpiry(t *testing.T) { + m := NewMemLS().(*memLS) + testCases := []string{ + "setNow 0", + "create /a.5", + "want /a.5", + "create /c.6", + "want /a.5 /c.6", + "create /a/b.7", + "want /a.5 /a/b.7 /c.6", + "setNow 4", + "want /a.5 /a/b.7 /c.6", + "setNow 5", + "want /a/b.7 /c.6", + "setNow 6", + "want /a/b.7", + "setNow 7", + "want ", + "setNow 8", + "want ", + "create /a.12", + "create /b.13", + "create /c.15", + "create /a/d.16", + "want /a.12 /a/d.16 /b.13 /c.15", + "refresh /a.14", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 12", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 13", + "want /a.14 /a/d.16 /c.15", + "setNow 14", + "want /a/d.16 /c.15", + "refresh /a/d.20", + "refresh /c.20", + "want /a/d.20 /c.20", + "setNow 20", + "want ", + } + + tokens := map[string]string{} + zTime := time.Unix(0, 0) + now := zTime + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create", "refresh": + parts := strings.Split(arg, ".") + if len(parts) != 2 { + t.Fatalf("test case #%d %q: invalid create", i, tc) + } + root := parts[0] + d, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) + + switch op { + case "create": + token, err := m.Create(now, LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + }) + if err != nil { + t.Fatalf("test case #%d %q: Create: %v", i, tc, err) + } + tokens[root] = token + + case "refresh": + token := tokens[root] + if token == "" { + t.Fatalf("test case #%d %q: no token for %q", i, tc, root) + } + got, err := m.Refresh(now, token, dur) + if err != nil { + t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) + } + want := LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + } + if got != want { + t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) + } + } + + case "setNow": + d, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) + + case "want": + m.mu.Lock() + m.collectExpiredNodes(now) + got := make([]string, 0, len(m.byToken)) + for _, n := range m.byToken { + got = append(got, fmt.Sprintf("%s.%d", + n.details.Root, n.expiry.Sub(zTime)/time.Second)) + } + m.mu.Unlock() + sort.Strings(got) + want := []string{} + if arg != "" { + want = strings.Split(arg, " ") + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) + } + } +} + +func TestMemLS(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + rng := rand.New(rand.NewSource(0)) + tokens := map[string]string{} + nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 + const N = 2000 + + for i := 0; i < N; i++ { + name := lockTestNames[rng.Intn(len(lockTestNames))] + duration := lockTestDurations[rng.Intn(len(lockTestDurations))] + confirmed, unlocked := false, false + + // If the name was already locked, we randomly confirm/release, refresh + // or unlock it. Otherwise, we create a lock. + token := tokens[name] + if token != "" { + switch rng.Intn(3) { + case 0: + confirmed = true + nConfirm++ + release, err := m.Confirm(now, name, "", Condition{Token: token}) + if err != nil { + t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) + } + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + release() + + case 1: + nRefresh++ + if _, err := m.Refresh(now, token, duration); err != nil { + t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) + } + + case 2: + unlocked = true + nUnlock++ + if err := m.Unlock(now, token); err != nil { + t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) + } + } + + } else { + nCreate++ + var err error + token, err = m.Create(now, LockDetails{ + Root: name, + Duration: duration, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("iteration #%d: Create %q: %v", i, name, err) + } + } + + if !confirmed { + if duration == 0 || unlocked { + // A zero-duration lock should expire immediately and is + // effectively equivalent to being unlocked. + tokens[name] = "" + } else { + tokens[name] = token + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + } + + if nConfirm < N/10 { + t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) + } + if nCreate < N/10 { + t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) + } + if nRefresh < N/10 { + t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) + } + if nUnlock < N/10 { + t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) + } +} + +func (m *memLS) consistent() error { + m.mu.Lock() + defer m.mu.Unlock() + + // If m.byName is non-empty, then it must contain an entry for the root "/", + // and its refCount should equal the number of locked nodes. + if len(m.byName) > 0 { + n := m.byName["/"] + if n == nil { + return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) + } + if n.refCount != len(m.byToken) { + return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) + } + } + + for name, n := range m.byName { + // The map keys should be consistent with the node's copy of the key. + if n.details.Root != name { + return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) + } + + // A name must be clean, and start with a "/". + if len(name) == 0 || name[0] != '/' { + return fmt.Errorf(`node name %q does not start with "/"`, name) + } + if name != path.Clean(name) { + return fmt.Errorf(`node name %q is not clean`, name) + } + + // A node's refCount should be positive. + if n.refCount <= 0 { + return fmt.Errorf("non-positive refCount for node at name %q", name) + } + + // A node's refCount should be the number of self-or-descendents that + // are locked (i.e. have a non-empty token). + var list []string + for name0, n0 := range m.byName { + // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', + // so strings.HasPrefix is equivalent to self-or-descendent name match. + // We don't have to worry about "/foo/bar" being a false positive match + // for "/foo/b". + if strings.HasPrefix(name0, name) && n0.token != "" { + list = append(list, name0) + } + } + if n.refCount != len(list) { + sort.Strings(list) + return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", + name, n.refCount, list, len(list)) + } + + // A node n is in m.byToken if it has a non-empty token. + if n.token != "" { + if _, ok := m.byToken[n.token]; !ok { + return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) + } + } + + // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. + if n.byExpiryIndex >= 0 { + if n.byExpiryIndex >= len(m.byExpiry) { + return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) + } + if n != m.byExpiry[n.byExpiryIndex] { + return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) + } + } + } + + for token, n := range m.byToken { + // The map keys should be consistent with the node's copy of the key. + if n.token != token { + return fmt.Errorf("node token %q != byToken map key %q", n.token, token) + } + + // Every node in m.byToken is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) + } + } + + for i, n := range m.byExpiry { + // The slice indices should be consistent with the node's copy of the index. + if n.byExpiryIndex != i { + return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) + } + + // Every node in m.byExpiry is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) + } + + // No node in m.byExpiry should be held. + if n.held { + return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) + } + } + return nil +} + +func TestParseTimeout(t *testing.T) { + testCases := []struct { + s string + want time.Duration + wantErr error + }{{ + "", + infiniteTimeout, + nil, + }, { + "Infinite", + infiniteTimeout, + nil, + }, { + "Infinitesimal", + 0, + errInvalidTimeout, + }, { + "infinite", + 0, + errInvalidTimeout, + }, { + "Second-0", + 0 * time.Second, + nil, + }, { + "Second-123", + 123 * time.Second, + nil, + }, { + " Second-456 ", + 456 * time.Second, + nil, + }, { + "Second-4100000000", + 4100000000 * time.Second, + nil, + }, { + "junk", + 0, + errInvalidTimeout, + }, { + "Second-", + 0, + errInvalidTimeout, + }, { + "Second--1", + 0, + errInvalidTimeout, + }, { + "Second--123", + 0, + errInvalidTimeout, + }, { + "Second-+123", + 0, + errInvalidTimeout, + }, { + "Second-0x123", + 0, + errInvalidTimeout, + }, { + "second-123", + 0, + errInvalidTimeout, + }, { + "Second-4294967295", + 4294967295 * time.Second, + nil, + }, { + // Section 10.7 says that "The timeout value for TimeType "Second" + // must not be greater than 2^32-1." + "Second-4294967296", + 0, + errInvalidTimeout, + }, { + // This test case comes from section 9.10.9 of the spec. It says, + // + // "In this request, the client has specified that it desires an + // infinite-length lock, if available, otherwise a timeout of 4.1 + // billion seconds, if available." + // + // The Go WebDAV package always supports infinite length locks, + // and ignores the fallback after the comma. + "Infinite, Second-4100000000", + infiniteTimeout, + nil, + }} + + for _, tc := range testCases { + got, gotErr := parseTimeout(tc.s) + if got != tc.want || gotErr != tc.wantErr { + t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go new file mode 100644 index 00000000000..14594663787 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop.go @@ -0,0 +1,395 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" +) + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []Property +} + +// Propstat describes a XML propstat element as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type Propstat struct { + // Props contains the properties for which Status applies. + Props []Property + + // Status defines the HTTP status code of the properties in Prop. + // Allowed values include, but are not limited to the WebDAV status + // code extensions for HTTP/1.1. + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 + Status int + + // XMLError contains the XML representation of the optional error element. + // XML content within this field must not rely on any predefined + // namespace declarations or prefixes. If empty, the XML error element + // is omitted. + XMLError string + + // ResponseDescription contains the contents of the optional + // responsedescription field. If empty, the XML element is omitted. + ResponseDescription string +} + +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) +} + +// liveProps contains all supported, protected DAV: properties. +var liveProps = map[xml.Name]struct { + // findFn implements the propfind function of this property. If nil, + // it indicates a hidden property. + findFn func(FileSystem, LockSystem, string, os.FileInfo) (string, error) + // dir is true if the property applies to directories. + dir bool +}{ + xml.Name{Space: "DAV:", Local: "resourcetype"}: { + findFn: findResourceType, + dir: true, + }, + xml.Name{Space: "DAV:", Local: "displayname"}: { + findFn: findDisplayName, + dir: true, + }, + xml.Name{Space: "DAV:", Local: "getcontentlength"}: { + findFn: findContentLength, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getlastmodified"}: { + findFn: findLastModified, + // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified + // suggests that getlastmodified should only apply to GETable + // resources, and this package does not support GET on directories. + // + // Nonetheless, some WebDAV clients expect child directories to be + // sortable by getlastmodified date, so this value is true, not false. + // See golang.org/issue/15334. + dir: true, + }, + xml.Name{Space: "DAV:", Local: "creationdate"}: { + findFn: nil, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getcontentlanguage"}: { + findFn: nil, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getcontenttype"}: { + findFn: findContentType, + dir: false, + }, + xml.Name{Space: "DAV:", Local: "getetag"}: { + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's + // modification time and size. This is not a reliable synchronization + // mechanism for directories, so we do not advertise getetag for DAV + // collections. + dir: false, + }, + + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. + xml.Name{Space: "DAV:", Local: "lockdiscovery"}: {}, + xml.Name{Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, +} + +// TODO(nigeltao) merge props and allprop? + +// Props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue + } + // Otherwise, it must either be a live property or we don't know it. + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(fs, ls, name, fi) + if err != nil { + return nil, err + } + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) + } + } + return makePropstats(pstatOK, pstatNotFound), nil +} + +// Propnames returns the property names defined for resource name. +func propnames(fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) + for pn, prop := range liveProps { + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil +} + +// Allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(fs, ls, name) + if err != nil { + return nil, err + } + // Add names from include if they are not already covered in pnames. + nameset := make(map[xml.Name]bool) + for _, pn := range pnames { + nameset[pn] = true + } + for _, pn := range include { + if !nameset[pn] { + pnames = append(pnames, pn) + } + } + return props(fs, ls, name, pnames) +} + +// Patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: ``, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } + + f, err := fs.OpenFile(name, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + } + } + return []Propstat{pstat}, nil +} + +func findResourceType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + return ``, nil + } + return "", nil +} + +func findDisplayName(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if slashClean(name) == "/" { + // Hide the real name of a possibly prefixed root directory. + return "", nil + } + return fi.Name(), nil +} + +func findContentLength(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return strconv.FormatInt(fi.Size(), 10), nil +} + +func findLastModified(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return fi.ModTime().Format(http.TimeFormat), nil +} + +func findContentType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer f.Close() + // This implementation is based on serveContent's code in the standard net/http package. + ctype := mime.TypeByExtension(filepath.Ext(name)) + if ctype != "" { + return ctype, nil + } + // Read a chunk to decide between utf-8 text and binary. + var buf [512]byte + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return "", err + } + ctype = http.DetectContentType(buf[:n]) + // Rewind file. + _, err = f.Seek(0, os.SEEK_SET) + return ctype, err +} + +func findETag(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + // The Apache http 2.4 web server by default concatenates the + // modification time and size of a file. We replicate the heuristic + // with nanosecond granularity. + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `` + + `` + + `` + + ``, nil +} diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go new file mode 100644 index 00000000000..0834dc9f148 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop_test.go @@ -0,0 +1,610 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "net/http" + "os" + "reflect" + "sort" + "testing" +) + +func TestMemPS(t *testing.T) { + // calcProps calculates the getlastmodified and getetag DAV: property + // values in pstats for resource name in file-system fs. + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { + fi, err := fs.Stat(name) + if err != nil { + return err + } + for _, pst := range pstats { + for i, p := range pst.Props { + switch p.XMLName { + case xml.Name{Space: "DAV:", Local: "getlastmodified"}: + p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat)) + pst.Props[i] = p + case xml.Name{Space: "DAV:", Local: "getetag"}: + if fi.IsDir() { + continue + } + etag, err := findETag(fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) + pst.Props[i] = p + } + } + } + return nil + } + + const ( + lockEntry = `` + + `` + + `` + + `` + + `` + statForbiddenError = `` + ) + + type propOp struct { + op string + name string + pnames []xml.Name + patches []Proppatch + wantPnames []xml.Name + wantPropstats []Propstat + } + + testCases := []struct { + desc string + noDeadProps bool + buildfs []string + propOp []propOp + }{{ + desc: "propname", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propname", + name: "/dir", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "getlastmodified"}, + }, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }}, + }, { + desc: "allprop dir and file", + buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, + propOp: []propOp{{ + op: "allprop", + name: "/dir", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + pnames: []xml.Name{ + {"DAV:", "resourcetype"}, + {"foo", "bar"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}}, { + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}}, + }, + }}, + }, { + desc: "propfind DAV:resourcetype", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }}, + }}, + }}, + }, { + desc: "propfind unsupported DAV properties", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "creationdate"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, + }}, + }}, + }}, + }, { + desc: "propfind getetag for files but not for directories", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }}, + }}, + }}, + }, { + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }, { + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("xxx"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + }}, + }, { + Status: StatusFailedDependency, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }, { + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }}, + }, { + desc: "propname with dead property", + buildfs: []string{"touch /file"}, + propOp: []propOp{{ + op: "proppatch", + name: "/file", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, + }, + }}, + }, { + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "bad: propfind unknown property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"foo:", "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo:", Local: "bar"}, + }}, + }}, + }}, + }} + + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } + ls := NewMemLS() + for _, op := range tc.propOp { + desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { + t.Fatalf("%s: calcProps: %v", desc, err) + } + + // Call property system. + var propstats []Propstat + switch op.op { + case "propname": + pnames, err := propnames(fs, ls, op.name) + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) + } + continue + case "allprop": + propstats, err = allprop(fs, ls, op.name, op.pnames) + case "propfind": + propstats, err = props(fs, ls, op.name, op.pnames) + case "proppatch": + propstats, err = patch(fs, ls, op.name, op.patches) + default: + t.Fatalf("%s: %s not implemented", desc, op.op) + } + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + // Compare return values from allprop, propfind or proppatch. + for _, pst := range propstats { + sort.Sort(byPropname(pst.Props)) + } + for _, pst := range op.wantPropstats { + sort.Sort(byPropname(pst.Props)) + } + sort.Sort(byStatus(propstats)) + sort.Sort(byStatus(op.wantPropstats)) + if !reflect.DeepEqual(propstats, op.wantPropstats) { + t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) + } + } + } +} + +func cmpXMLName(a, b xml.Name) bool { + if a.Space != b.Space { + return a.Space < b.Space + } + return a.Local < b.Local +} + +type byXMLName []xml.Name + +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } + +type byPropname []Property + +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } + +type byStatus []Propstat + +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go new file mode 100644 index 00000000000..4ce09728bc5 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav.go @@ -0,0 +1,689 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package webdav provides a WebDAV server implementation. +package webdav // import "golang.org/x/net/webdav" + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" +) + +type Handler struct { + // Prefix is the URL path prefix to strip from WebDAV resource paths. + Prefix string + // FileSystem is the virtual file system. + FileSystem FileSystem + // LockSystem is the lock management system. + LockSystem LockSystem + // Logger is an optional error logger. If non-nil, it will be called + // for all HTTP requests. + Logger func(*http.Request, error) +} + +func (h *Handler) stripPrefix(p string) (string, int, error) { + if h.Prefix == "" { + return p, http.StatusOK, nil + } + if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { + return r, http.StatusOK, nil + } + return p, http.StatusNotFound, errPrefixMismatch +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + status, err := http.StatusBadRequest, errUnsupportedMethod + if h.FileSystem == nil { + status, err = http.StatusInternalServerError, errNoFileSystem + } else if h.LockSystem == nil { + status, err = http.StatusInternalServerError, errNoLockSystem + } else { + switch r.Method { + case "OPTIONS": + status, err = h.handleOptions(w, r) + case "GET", "HEAD", "POST": + status, err = h.handleGetHeadPost(w, r) + case "DELETE": + status, err = h.handleDelete(w, r) + case "PUT": + status, err = h.handlePut(w, r) + case "MKCOL": + status, err = h.handleMkcol(w, r) + case "COPY", "MOVE": + status, err = h.handleCopyMove(w, r) + case "LOCK": + status, err = h.handleLock(w, r) + case "UNLOCK": + status, err = h.handleUnlock(w, r) + case "PROPFIND": + status, err = h.handlePropfind(w, r) + case "PROPPATCH": + status, err = h.handleProppatch(w, r) + } + } + + if status != 0 { + w.WriteHeader(status) + if status != http.StatusNoContent { + w.Write([]byte(StatusText(status))) + } + } + if h.Logger != nil { + h.Logger(r, err) + } +} + +func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { + token, err = h.LockSystem.Create(now, LockDetails{ + Root: root, + Duration: infiniteTimeout, + ZeroDepth: true, + }) + if err != nil { + if err == ErrLocked { + return "", StatusLocked, err + } + return "", http.StatusInternalServerError, err + } + return token, 0, nil +} + +func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { + hdr := r.Header.Get("If") + if hdr == "" { + // An empty If header means that the client hasn't previously created locks. + // Even if this client doesn't care about locks, we still need to check that + // the resources aren't locked by another client, so we create temporary + // locks that would conflict with another client's locks. These temporary + // locks are unlocked at the end of the HTTP request. + now, srcToken, dstToken := time.Now(), "", "" + if src != "" { + srcToken, status, err = h.lock(now, src) + if err != nil { + return nil, status, err + } + } + if dst != "" { + dstToken, status, err = h.lock(now, dst) + if err != nil { + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + return nil, status, err + } + } + + return func() { + if dstToken != "" { + h.LockSystem.Unlock(now, dstToken) + } + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + }, 0, nil + } + + ih, ok := parseIfHeader(hdr) + if !ok { + return nil, http.StatusBadRequest, errInvalidIfHeader + } + // ih is a disjunction (OR) of ifLists, so any ifList will do. + for _, l := range ih.lists { + lsrc := l.resourceTag + if lsrc == "" { + lsrc = src + } else { + u, err := url.Parse(lsrc) + if err != nil { + continue + } + if u.Host != r.Host { + continue + } + lsrc, status, err = h.stripPrefix(u.Path) + if err != nil { + return nil, status, err + } + } + release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) + if err == ErrConfirmationFailed { + continue + } + if err != nil { + return nil, http.StatusInternalServerError, err + } + return release, 0, nil + } + // Section 10.4.1 says that "If this header is evaluated and all state lists + // fail, then the request must fail with a 412 (Precondition Failed) status." + // We follow the spec even though the cond_put_corrupt_token test case from + // the litmus test warns on seeing a 412 instead of a 423 (Locked). + return nil, http.StatusPreconditionFailed, ErrLocked +} + +func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + allow := "OPTIONS, LOCK, PUT, MKCOL" + if fi, err := h.FileSystem.Stat(reqPath); err == nil { + if fi.IsDir() { + allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" + } else { + allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" + } + } + w.Header().Set("Allow", allow) + // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes + w.Header().Set("DAV", "1, 2") + // http://msdn.microsoft.com/en-au/library/cc250217.aspx + w.Header().Set("MS-Author-Via", "DAV") + return 0, nil +} + +func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + // TODO: check locks for read-only access?? + f, err := h.FileSystem.OpenFile(reqPath, os.O_RDONLY, 0) + if err != nil { + return http.StatusNotFound, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return http.StatusNotFound, err + } + if fi.IsDir() { + return http.StatusMethodNotAllowed, nil + } + etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + // Let ServeContent determine the Content-Type header. + http.ServeContent(w, r, reqPath, fi.ModTime(), f) + return 0, nil +} + +func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + // TODO: return MultiStatus where appropriate. + + // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll + // returns nil (no error)." WebDAV semantics are that it should return a + // "404 Not Found". We therefore have to Stat before we RemoveAll. + if _, err := h.FileSystem.Stat(reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + if err := h.FileSystem.RemoveAll(reqPath); err != nil { + return http.StatusMethodNotAllowed, err + } + return http.StatusNoContent, nil +} + +func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. + + f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return http.StatusNotFound, err + } + _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() + closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. + if copyErr != nil { + return http.StatusMethodNotAllowed, copyErr + } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } + if closeErr != nil { + return http.StatusMethodNotAllowed, closeErr + } + etag, err := findETag(h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + return http.StatusCreated, nil +} + +func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + if r.ContentLength > 0 { + return http.StatusUnsupportedMediaType, nil + } + if err := h.FileSystem.Mkdir(reqPath, 0777); err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusMethodNotAllowed, err + } + return http.StatusCreated, nil +} + +func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { + hdr := r.Header.Get("Destination") + if hdr == "" { + return http.StatusBadRequest, errInvalidDestination + } + u, err := url.Parse(hdr) + if err != nil { + return http.StatusBadRequest, errInvalidDestination + } + if u.Host != r.Host { + return http.StatusBadGateway, errInvalidDestination + } + + src, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + + dst, status, err := h.stripPrefix(u.Path) + if err != nil { + return status, err + } + + if dst == "" { + return http.StatusBadGateway, errInvalidDestination + } + if dst == src { + return http.StatusForbidden, errDestinationEqualsSource + } + + if r.Method == "COPY" { + // Section 7.5.1 says that a COPY only needs to lock the destination, + // not both destination and source. Strictly speaking, this is racy, + // even though a COPY doesn't modify the source, if a concurrent + // operation modifies the source. However, the litmus test explicitly + // checks that COPYing a locked-by-another source is OK. + release, status, err := h.confirmLocks(r, "", dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.8.3 says that "The COPY method on a collection without a Depth + // header must act as if a Depth header with value "infinity" was included". + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.8.3 says that "A client may submit a Depth header on a + // COPY on a collection with a value of "0" or "infinity"." + return http.StatusBadRequest, errInvalidDepth + } + } + return copyFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + } + + release, status, err := h.confirmLocks(r, src, dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.9.2 says that "The MOVE method on a collection must act as if + // a "Depth: infinity" header was used on it. A client must not submit a + // Depth header on a MOVE on a collection with any value but "infinity"." + if hdr := r.Header.Get("Depth"); hdr != "" { + if parseDepth(hdr) != infiniteDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + return moveFiles(h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") +} + +func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { + duration, err := parseTimeout(r.Header.Get("Timeout")) + if err != nil { + return http.StatusBadRequest, err + } + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, err + } + + token, ld, now, created := "", LockDetails{}, time.Now(), false + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get("If")) + if !ok { + return http.StatusBadRequest, errInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, errInvalidLockToken + } + ld, err = h.LockSystem.Refresh(now, token, duration) + if err != nil { + if err == ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, errInvalidDepth + } + } + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ld = LockDetails{ + Root: reqPath, + Duration: duration, + OwnerXML: li.Owner.InnerXML, + ZeroDepth: depth == 0, + } + token, err = h.LockSystem.Create(now, ld) + if err != nil { + if err == ErrLocked { + return StatusLocked, err + } + return http.StatusInternalServerError, err + } + defer func() { + if retErr != nil { + h.LockSystem.Unlock(now, token) + } + }() + + // Create the resource if it didn't previously exist. + if _, err := h.FileSystem.Stat(reqPath); err != nil { + f, err := h.FileSystem.OpenFile(reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + writeLockInfo(w, token, ld) + return 0, nil +} + +func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We strip its angle brackets. + t := r.Header.Get("Lock-Token") + if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { + return http.StatusBadRequest, errInvalidLockToken + } + t = t[1 : len(t)-1] + + switch err = h.LockSystem.Unlock(time.Now(), t); err { + case nil: + return http.StatusNoContent, err + case ErrForbidden: + return http.StatusForbidden, err + case ErrLocked: + return StatusLocked, err + case ErrNoSuchLock: + return http.StatusConflict, err + default: + return http.StatusInternalServerError, err + } +} + +func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + fi, err := h.FileSystem.Stat(reqPath) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth == invalidDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + pf, status, err := readPropfind(r.Body) + if err != nil { + return status, err + } + + mw := multistatusWriter{w: w} + + walkFn := func(reqPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + var pstats []Propstat + if pf.Propname != nil { + pnames, err := propnames(h.FileSystem, h.LockSystem, reqPath) + if err != nil { + return err + } + pstat := Propstat{Status: http.StatusOK} + for _, xmlname := range pnames { + pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) + } + pstats = append(pstats, pstat) + } else if pf.Allprop != nil { + pstats, err = allprop(h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } else { + pstats, err = props(h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } + if err != nil { + return err + } + return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) + } + + walkErr := walkFS(h.FileSystem, depth, reqPath, fi, walkFn) + closeErr := mw.close() + if walkErr != nil { + return http.StatusInternalServerError, walkErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + if _, err := h.FileSystem.Stat(reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + patches, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + pstats, err := patch(h.FileSystem, h.LockSystem, reqPath, patches) + if err != nil { + return http.StatusInternalServerError, err + } + mw := multistatusWriter{w: w} + writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) + closeErr := mw.close() + if writeErr != nil { + return http.StatusInternalServerError, writeErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func makePropstatResponse(href string, pstats []Propstat) *response { + resp := response{ + Href: []string{(&url.URL{Path: href}).EscapedPath()}, + Propstat: make([]propstat, 0, len(pstats)), + } + for _, p := range pstats { + var xmlErr *xmlError + if p.XMLError != "" { + xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} + } + resp.Propstat = append(resp.Propstat, propstat{ + Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), + Prop: p.Props, + ResponseDescription: p.ResponseDescription, + Error: xmlErr, + }) + } + return &resp +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 +const ( + StatusMulti = 207 + StatusUnprocessableEntity = 422 + StatusLocked = 423 + StatusFailedDependency = 424 + StatusInsufficientStorage = 507 +) + +func StatusText(code int) string { + switch code { + case StatusMulti: + return "Multi-Status" + case StatusUnprocessableEntity: + return "Unprocessable Entity" + case StatusLocked: + return "Locked" + case StatusFailedDependency: + return "Failed Dependency" + case StatusInsufficientStorage: + return "Insufficient Storage" + } + return http.StatusText(code) +} + +var ( + errDestinationEqualsSource = errors.New("webdav: destination equals source") + errDirectoryNotEmpty = errors.New("webdav: directory not empty") + errInvalidDepth = errors.New("webdav: invalid depth") + errInvalidDestination = errors.New("webdav: invalid destination") + errInvalidIfHeader = errors.New("webdav: invalid If header") + errInvalidLockInfo = errors.New("webdav: invalid lock info") + errInvalidLockToken = errors.New("webdav: invalid lock token") + errInvalidPropfind = errors.New("webdav: invalid propfind") + errInvalidProppatch = errors.New("webdav: invalid proppatch") + errInvalidResponse = errors.New("webdav: invalid response") + errInvalidTimeout = errors.New("webdav: invalid timeout") + errNoFileSystem = errors.New("webdav: no file system") + errNoLockSystem = errors.New("webdav: no lock system") + errNotADirectory = errors.New("webdav: not a directory") + errPrefixMismatch = errors.New("webdav: prefix mismatch") + errRecursionTooDeep = errors.New("webdav: recursion too deep") + errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + errUnsupportedMethod = errors.New("webdav: unsupported method") +) diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go new file mode 100644 index 00000000000..b068aab323d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav_test.go @@ -0,0 +1,285 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" +) + +// TODO: add tests to check XML responses with the expected prefix path +func TestPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + // createLockBody comes from the example in Section 9.10.7. + const createLockBody = ` + + + + + http://example.org/~ejw/contact.html + + + ` + + do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { + var bodyReader io.Reader + if body != "" { + bodyReader = strings.NewReader(body) + } + req, err := http.NewRequest(method, urlStr, bodyReader) + if err != nil { + return nil, err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return res.Header, nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + for _, prefix := range prefixes { + fs := NewMemFS() + h := &Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + } + mux := http.NewServeMux() + if prefix != "/" { + h.Prefix = prefix + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // LOCK /a/b/e/g + // PUT /a/b/e/g + // which should yield the (possibly stripped) filenames /a/b/c, + // /a/b/e/f and /a/b/e/g, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + var lockToken string + wantG := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { + t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) + continue + } else { + lockToken = h.Get("Lock-Token") + } + + ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) + wantH := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) + continue + } + + got, err := find(nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, + "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, + "/a/b/c/": {"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} + +func TestFilenameEscape(t *testing.T) { + re := regexp.MustCompile(`([^<]*)`) + do := func(method, urlStr string) (string, error) { + req, err := http.NewRequest(method, urlStr, nil) + if err != nil { + return "", err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", err + } + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + m := re.FindStringSubmatch(string(b)) + if len(m) != 2 { + return "", errors.New("D:href not found") + } + + return m[1], nil + } + + testCases := []struct { + name, want string + }{{ + name: `/foo%bar`, + want: `/foo%25bar`, + }, { + name: `/こんにちわ世界`, + want: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + }, { + name: `/Program Files/`, + want: `/Program%20Files`, + }, { + name: `/go+lang`, + want: `/go+lang`, + }, { + name: `/go&lang`, + want: `/go&lang`, + }} + fs := NewMemFS() + for _, tc := range testCases { + if strings.HasSuffix(tc.name, "/") { + if err := fs.Mkdir(tc.name, 0755); err != nil { + t.Fatalf("name=%q: Mkdir: %v", tc.name, err) + } + } else { + f, err := fs.OpenFile(tc.name, os.O_CREATE, 0644) + if err != nil { + t.Fatalf("name=%q: OpenFile: %v", tc.name, err) + } + f.Close() + } + } + + srv := httptest.NewServer(&Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + }) + defer srv.Close() + + u, err := url.Parse(srv.URL) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testCases { + u.Path = tc.name + got, err := do("PROPFIND", u.String()) + if err != nil { + t.Errorf("name=%q: PROPFIND: %v", tc.name, err) + continue + } + if got != tc.want { + t.Errorf("name=%q: got %q, want %q", tc.name, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/xml.go b/vendor/golang.org/x/net/webdav/xml.go new file mode 100644 index 00000000000..790dc81696c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml.go @@ -0,0 +1,519 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The XML encoding is covered by Section 14. +// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "time" + + // As of https://go-review.googlesource.com/#/c/12772/ which was submitted + // in July 2015, this package uses an internal fork of the standard + // library's encoding/xml package, due to changes in the way namespaces + // were encoded. Such changes were introduced in the Go 1.5 cycle, but were + // rolled back in response to https://github.com/golang/go/issues/11841 + // + // However, this package's exported API, specifically the Property and + // DeadPropsHolder types, need to refer to the standard library's version + // of the xml.Name type, as code that imports this package cannot refer to + // the internal version. + // + // This file therefore imports both the internal and external versions, as + // ixml and xml, and converts between them. + // + // In the long term, this package should use the standard library's version + // only, and the internal fork deleted, once + // https://github.com/golang/go/issues/13400 is resolved. + ixml "golang.org/x/net/webdav/internal/xml" +) + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo +type lockInfo struct { + XMLName ixml.Name `xml:"lockinfo"` + Exclusive *struct{} `xml:"lockscope>exclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = ixml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = errInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + if li.Exclusive == nil || li.Shared != nil || li.Write == nil { + return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo + } + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + timeout := ld.Duration / time.Second + return fmt.Fprintf(w, "\n"+ + "\n"+ + " \n"+ + " \n"+ + " %s\n"+ + " %s\n"+ + " Second-%d\n"+ + " %s\n"+ + " %s\n"+ + "", + depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), + ) +} + +func escape(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '&', '\'', '<', '>': + b := bytes.NewBuffer(nil) + ixml.EscapeText(b, []byte(s)) + return b.String() + } + } + return s +} + +// Next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func next(d *ixml.Decoder) (ixml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case ixml.Comment, ixml.Directive, ixml.ProcInst: + continue + default: + return t, nil + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type propfindProps []xml.Name + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + for { + t, err := next(d) + if err != nil { + return err + } + switch t.(type) { + case ixml.EndElement: + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + name := t.(ixml.StartElement).Name + t, err = next(d) + if err != nil { + return err + } + if _, ok := t.(ixml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, xml.Name(name)) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type propfind struct { + XMLName ixml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop propfindProps `xml:"DAV: prop"` + Include propfindProps `xml:"DAV: include"` +} + +func readPropfind(r io.Reader) (pf propfind, status int, err error) { + c := countingReader{r: r} + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return propfind{Allprop: new(struct{})}, 0, nil + } + err = errInvalidPropfind + } + return propfind{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + return pf, 0, nil +} + +// Property represents a single DAV resource property as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type Property struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. +type xmlError struct { + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. +type propstat struct { + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } + for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. +type response struct { + XMLName ixml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MultistatusWriter marshals one or more Responses into a XML +// multistatus response. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 +type multistatusWriter struct { + // ResponseDescription contains the optional responsedescription + // of the multistatus XML element. Only the latest content before + // close will be emitted. Empty response descriptions are not + // written. + responseDescription string + + w http.ResponseWriter + enc *ixml.Encoder +} + +// Write validates and emits a DAV response as part of a multistatus response +// element. +// +// It sets the HTTP status code of its underlying http.ResponseWriter to 207 +// (Multi-Status) and populates the Content-Type header. If r is the +// first, valid response to be written, Write prepends the XML representation +// of r with a multistatus tag. Callers must call close after the last response +// has been written. +func (w *multistatusWriter) write(r *response) error { + switch len(r.Href) { + case 0: + return errInvalidResponse + case 1: + if len(r.Propstat) > 0 != (r.Status == "") { + return errInvalidResponse + } + default: + if len(r.Propstat) > 0 || r.Status == "" { + return errInvalidResponse + } + } + err := w.writeHeader() + if err != nil { + return err + } + return w.enc.Encode(r) +} + +// writeHeader writes a XML multistatus start element on w's underlying +// http.ResponseWriter and returns the result of the write operation. +// After the first write attempt, writeHeader becomes a no-op. +func (w *multistatusWriter) writeHeader() error { + if w.enc != nil { + return nil + } + w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") + w.w.WriteHeader(StatusMulti) + _, err := fmt.Fprintf(w.w, ``) + if err != nil { + return err + } + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ + Space: "DAV:", + Local: "multistatus", + }, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, + Value: "DAV:", + }}, + }) +} + +// Close completes the marshalling of the multistatus response. It returns +// an error if the multistatus response could not be completed. If both the +// return value and field enc of w are nil, then no multistatus response has +// been written. +func (w *multistatusWriter) close() error { + if w.enc == nil { + return nil + } + var end []ixml.Token + if w.responseDescription != "" { + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} + end = append(end, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, + ) + } + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, + }) + for _, t := range end { + err := w.enc.EncodeToken(t) + if err != nil { + return err + } + } + return w.enc.Flush() +} + +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s ixml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} + +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := ixml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case ixml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + p := Property{ + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), + } + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) + if err != nil { + return err + } + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName ixml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case ixml.Name{Space: "DAV:", Local: "set"}: + // No-op. + case ixml.Name{Space: "DAV:", Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go new file mode 100644 index 00000000000..a3d9e1ed89e --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml_test.go @@ -0,0 +1,906 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + + ixml "golang.org/x/net/webdav/internal/xml" +) + +func TestReadLockInfo(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + wantLI lockInfo + wantStatus int + }{{ + "bad: junk", + "xxx", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid owner XML", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " no end tag \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid UTF-8", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \xff \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #1", + "" + + "\n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #2", + "" + + "\n" + + " \n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "good: empty", + "", + lockInfo{}, + 0, + }, { + "good: plain-text owner", + "" + + "\n" + + " \n" + + " \n" + + " gopher\n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "gopher", + }, + }, + 0, + }, { + "section 9.10.7", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " http://example.org/~ejw/contact.html\n" + + " \n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "\n http://example.org/~ejw/contact.html\n ", + }, + }, + 0, + }} + + for _, tc := range testCases { + li, status, err := readLockInfo(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { + t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", + tc.desc, li, status, tc.wantLI, tc.wantStatus) + continue + } + } +} + +func TestReadPropfind(t *testing.T) { + testCases := []struct { + desc string + input string + wantPF propfind + wantStatus int + }{{ + desc: "propfind: propname", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: empty body means allprop", + input: "", + wantPF: propfind{ + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop followed by include", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: include followed by allprop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: prop with ignored comments", + input: "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored whitespace", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored mixed-content", + input: "" + + "\n" + + " foobar\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propname with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + " *boss*\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: bad: junk", + input: "xxx", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and allprop (section A.3)", + input: "" + + "\n" + + " " + + " " + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: allprop and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty propfind with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty prop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: prop with just chardata", + input: "" + + "\n" + + " foo\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: interrupted prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: malformed end element prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with chardata value", + input: "" + + "\n" + + " bar\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with whitespace value", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: include without allprop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pf, status, err := readPropfind(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { + t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", + tc.desc, pf, status, tc.wantPF, tc.wantStatus) + continue + } + } +} + +func TestMultistatusWriter(t *testing.T) { + ///The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + responses []response + respdesc string + writeHeader bool + wantXML string + wantCode int + wantErr error + }{{ + desc: "section 9.2.2 (failed dependency)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Authors", + }, + }}, + Status: "HTTP/1.1 424 Failed Dependency", + }, { + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Copyright-Owner", + }, + }}, + Status: "HTTP/1.1 409 Conflict", + }}, + ResponseDescription: "Copyright Owner cannot be deleted or altered.", + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 424 Failed Dependency` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 409 Conflict` + + ` ` + + ` Copyright Owner cannot be deleted or altered.` + + `` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.6.2 (lock-token-submitted)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Status: "HTTP/1.1 423 Locked", + Error: &xmlError{ + InnerXML: []byte(``), + }, + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` HTTP/1.1 423 Locked` + + ` ` + + ` ` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.1.3", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, + InnerXML: []byte(`` + + `` + + `Box type A` + + ``), + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, + InnerXML: []byte(`` + + `` + + `J.J. Johnson` + + ``), + }}, + Status: "HTTP/1.1 200 OK", + }, { + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, + }}, + Status: "HTTP/1.1 403 Forbidden", + ResponseDescription: "The user does not have access to the DingALing property.", + }}, + }}, + respdesc: "There has been an access violation error.", + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` Box type A` + + ` J.J. Johnson` + + ` ` + + ` HTTP/1.1 200 OK` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 403 Forbidden` + + ` The user does not have access to the DingALing property.` + + ` ` + + ` ` + + ` There has been an access violation error.` + + ``, + wantCode: StatusMulti, + }, { + desc: "no response written", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "no response written (with description)", + respdesc: "too bad", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "empty multistatus with header", + writeHeader: true, + wantXML: ``, + wantCode: StatusMulti, + }, { + desc: "bad: no href", + responses: []response{{ + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and no status", + responses: []response{{ + Href: []string{"http://example.com/foo", "http://example.com/bar"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: one href and no propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: status with one href and propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + Status: "HTTP/1.1 200 OK", + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and propstat", + responses: []response{{ + Href: []string{ + "http://example.com/foo", + "http://example.com/bar", + }, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }} + + n := xmlNormalizer{omitWhitespace: true} +loop: + for _, tc := range testCases { + rec := httptest.NewRecorder() + w := multistatusWriter{w: rec, responseDescription: tc.respdesc} + if tc.writeHeader { + if err := w.writeHeader(); err != nil { + t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) + continue + } + } + for _, r := range tc.responses { + if err := w.write(&r); err != nil { + if err != tc.wantErr { + t.Errorf("%s: got write error %v, want %v", + tc.desc, err, tc.wantErr) + } + continue loop + } + } + if err := w.close(); err != tc.wantErr { + t.Errorf("%s: got close error %v, want %v", + tc.desc, err, tc.wantErr) + continue + } + if rec.Code != tc.wantCode { + t.Errorf("%s: got HTTP status code %d, want %d\n", + tc.desc, rec.Code, tc.wantCode) + continue + } + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) + } + } +} + +func TestReadProppatch(t *testing.T) { + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + + testCases := []struct { + desc string + input string + wantPP []Proppatch + wantStatus int + }{{ + desc: "proppatch: section 9.2 (with simple property value)", + input: `` + + `` + + `` + + ` ` + + ` somevalue` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, + "", + []byte(`somevalue`), + }}, + }, { + Remove: true, + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, + "", + nil, + }}, + }}, + }, { + desc: "proppatch: lang attribute on prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://example.com/ns", Local: "foo"}, + "en", + nil, + }}, + }}, + }, { + desc: "bad: remove with value", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` Jim Whitehead` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty propertyupdate", + input: `` + + `` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pp, status, err := readProppatch(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if status != tc.wantStatus { + t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) + continue + } + if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "foo", + wantVal: "foo", + }, { + desc: "empty element", + input: "", + wantVal: "", + }, { + desc: "preserve namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve root element namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve whitespace", + input: " \t ", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: ` a `, + wantVal: ` a `, + }, { + desc: "section 9.2", + input: `` + + `` + + ` Jim Whitehead` + + ` Roy Fielding` + + ``, + wantVal: `` + + ` Jim Whitehead` + + ` Roy Fielding`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of ]]>.` + + ` ` + + ``, + wantVal: `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of <RFC2518>.` + + ` `, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := ixml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// * Rename namespace prefixes according to an internal heuristic. +// * Remove unnecessary namespace declarations. +// * Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// * Remove XML directives and processing instructions. +// * Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// * Remove comments, if instructed to do so. +// +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case ixml.Directive, ixml.ProcInst: + continue + case ixml.Comment: + if n.omitComments { + continue + } + case ixml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []ixml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 00000000000..20d1e1e38eb --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + switch config.Location.Scheme { + case "ws": + client, err = net.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + client, err = tls.Dial("tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + if err != nil { + goto Error + } + + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go new file mode 100644 index 00000000000..72bb9d48eba --- /dev/null +++ b/vendor/golang.org/x/net/websocket/exampledial_test.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "fmt" + "log" + + "golang.org/x/net/websocket" +) + +// This example demonstrates a trivial client. +func ExampleDial() { + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + if _, err := ws.Write([]byte("hello, world!\n")); err != nil { + log.Fatal(err) + } + var msg = make([]byte, 512) + var n int + if n, err = ws.Read(msg); err != nil { + log.Fatal(err) + } + fmt.Printf("Received: %s.\n", msg[:n]) +} diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go new file mode 100644 index 00000000000..f22a98fcd43 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/examplehandler_test.go @@ -0,0 +1,26 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "io" + "net/http" + + "golang.org/x/net/websocket" +) + +// Echo the data received on the WebSocket. +func EchoServer(ws *websocket.Conn) { + io.Copy(ws, ws) +} + +// This example demonstrates a trivial echo server. +func ExampleHandler() { + http.Handle("/echo", websocket.Handler(EchoServer)) + err := http.ListenAndServe(":12345", nil) + if err != nil { + panic("ListenAndServe: " + err.Error()) + } +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 00000000000..8cffdd16c91 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go new file mode 100644 index 00000000000..9504aa2d30b --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi_test.go @@ -0,0 +1,608 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "testing" +) + +// Test the getNonceAccept function with values in +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 +func TestSecWebSocketAccept(t *testing.T) { + nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") + expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") + accept, err := getNonceAccept(nonce) + if err != nil { + t.Errorf("getNonceAccept: returned error %v", err) + return + } + if !bytes.Equal(expected, accept) { + t.Errorf("getNonceAccept: expected %q got %q", expected, accept) + } +} + +func TestHybiClientHandshake(t *testing.T) { + type test struct { + url, host string + } + tests := []test{ + {"ws://server.example.com/chat", "server.example.com"}, + {"ws://127.0.0.1/chat", "127.0.0.1"}, + } + if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { + tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) + } + + for _, tt := range tests { + var b bytes.Buffer + bw := bufio.NewWriter(&b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + var config Config + config.Location, err = url.ParseRequestURI(tt.url) + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + if err := hybiClientHandshake(&config, br, bw); err != nil { + t.Fatal("handshake", err) + } + req, err := http.ReadRequest(bufio.NewReader(&b)) + if err != nil { + t.Fatal("read request", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %s", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %s", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) + } + if req.Host != tt.host { + t.Errorf("request host expected %s, but got %s", tt.host, req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) + } + } + } +} + +func TestHybiClientHandshakeWithHeader(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + config := new(Config) + config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.Header = http.Header(make(map[string][]string)) + config.Header.Add("User-Agent", "test") + + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + err = hybiClientHandshake(config, br, bw) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + req, err := http.ReadRequest(bufio.NewReader(b)) + if err != nil { + t.Fatalf("read request: %v", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %q", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %q", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) + } + if req.Host != "server.example.com" { + t.Errorf("request Host expected server.example.com, but got %v", req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + "User-Agent": "test", + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) + } + } +} + +func TestHybiServerHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + expectedProtocols := []string{"chat", "superchat"} + if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) { + t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = config.Protocol[:1] + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeNoSubProtocol(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + if len(config.Protocol) != 0 { + t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol)) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeHybiBadVersion(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Sec-WebSocket-Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 9 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != ErrBadWebSocketVersion { + t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err) + } + if code != http.StatusBadRequest { + t.Errorf("status expected %q but got %q", http.StatusBadRequest, code) + } +} + +func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) { + b := bytes.NewBuffer([]byte{}) + frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false} + w, _ := frameWriterFactory.NewFrameWriter(TextFrame) + w.(*hybiFrameWriter).header = frameHeader + _, err := w.Write(testPayload) + w.Close() + if err != nil { + t.Errorf("Write error %q", err) + } + var expectedFrame []byte + expectedFrame = append(expectedFrame, testHeader...) + expectedFrame = append(expectedFrame, testMaskedPayload...) + if !bytes.Equal(expectedFrame, b.Bytes()) { + t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes()) + } + frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)} + r, err := frameReaderFactory.NewFrameReader() + if err != nil { + t.Errorf("Read error %q", err) + } + if header := r.HeaderReader(); header == nil { + t.Errorf("no header") + } else { + actualHeader := make([]byte, r.Len()) + n, err := header.Read(actualHeader) + if err != nil { + t.Errorf("Read header error %q", err) + } else { + if n < len(testHeader) { + t.Errorf("header too short %q got %q", testHeader, actualHeader[:n]) + } + if !bytes.Equal(testHeader, actualHeader[:n]) { + t.Errorf("header expected %q got %q", testHeader, actualHeader[:n]) + } + } + } + if trailer := r.TrailerReader(); trailer != nil { + t.Errorf("unexpected trailer %q", trailer) + } + frame := r.(*hybiFrameReader) + if frameHeader.Fin != frame.header.Fin || + frameHeader.OpCode != frame.header.OpCode || + len(testPayload) != int(frame.header.Length) { + t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame) + } + payload := make([]byte, len(testPayload)) + _, err = r.Read(payload) + if err != nil && err != io.EOF { + t.Errorf("read %v", err) + } + if !bytes.Equal(testPayload, payload) { + t.Errorf("payload %q vs %q", testPayload, payload) + } +} + +func TestHybiShortTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader) +} + +func TestHybiShortMaskedTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame, + MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}} + payload := []byte("hello") + maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3} + header := []byte{0x81, 0x85} + header = append(header, frameHeader.MaskingKey...) + testHybiFrame(t, header, payload, maskedPayload, frameHeader) +} + +func TestHybiShortBinaryFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader) +} + +func TestHybiControlFrame(t *testing.T) { + payload := []byte("hello") + + frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} + payload = []byte{0x03, 0xe8} // 1000 + testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) +} + +func TestHybiLongFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := make([]byte, 126) + testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader) + + payload = make([]byte, 65535) + testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader) + + payload = make([]byte, 65536) + testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader) +} + +func TestHybiClientRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[2:7], msg[:n]) { + t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n]) + } + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[16:21], msg[:n]) { + t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n]) + } + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiShortRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + step := 0 + pos := 0 + expectedPos := []int{2, 5, 16, 19} + expectedLen := []int{3, 2, 3, 2} + for { + msg := make([]byte, 3) + n, err := conn.Read(msg) + if step >= len(expectedPos) { + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } + return + } + pos = expectedPos[step] + endPos := pos + expectedLen[step] + if err != nil { + t.Errorf("read from %d, got error %q", pos, err) + return + } + if n != endPos-pos { + t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n) + } + if !bytes.Equal(wireData[pos:endPos], msg[:n]) { + t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n]) + } + step++ + } +} + +func TestHybiServerRead(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello + 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24, + 0x9a, 0xec, 0xc6, 0x48, 0x89, // world + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + + expected := [][]byte{[]byte("hello"), []byte("world")} + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[0], msg[:n]) { + t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n]) + } + + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[1], msg[:n]) { + t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n]) + } + + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiServerReadWithoutMasking(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + // server MUST close the connection upon receiving a non-masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +func TestHybiClientReadWithMasking(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + // client MUST close the connection upon receiving a masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +// Test the hybiServerHandshaker supports firefox implementation and +// checks Connection request header include (but it's not necessary +// equal to) "upgrade" +func TestHybiServerFirefoxHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: keep-alive, upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = []string{"chat"} + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 00000000000..0895dea1905 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 00000000000..9412191ded5 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,411 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go new file mode 100644 index 00000000000..05b7e5356e4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket_test.go @@ -0,0 +1,587 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var serverAddr string +var once sync.Once + +func echoServer(ws *Conn) { + defer ws.Close() + io.Copy(ws, ws) +} + +type Count struct { + S string + N int +} + +func countServer(ws *Conn) { + defer ws.Close() + for { + var count Count + err := JSON.Receive(ws, &count) + if err != nil { + return + } + count.N++ + count.S = strings.Repeat(count.S, count.N) + err = JSON.Send(ws, count) + if err != nil { + return + } + } +} + +type testCtrlAndDataHandler struct { + hybiFrameHandler +} + +func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { + h.hybiFrameHandler.conn.wio.Lock() + defer h.hybiFrameHandler.conn.wio.Unlock() + w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) + if err != nil { + return 0, err + } + n, err := w.Write(b) + w.Close() + return n, err +} + +func ctrlAndDataServer(ws *Conn) { + defer ws.Close() + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + go func() { + for i := 0; ; i++ { + var b []byte + if i%2 != 0 { // with or without payload + b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) + } + if _, err := h.WritePing(b); err != nil { + break + } + if _, err := h.WritePong(b); err != nil { // unsolicited pong + break + } + time.Sleep(10 * time.Millisecond) + } + }() + + b := make([]byte, 128) + for { + n, err := ws.Read(b) + if err != nil { + break + } + if _, err := ws.Write(b[:n]); err != nil { + break + } + } +} + +func subProtocolHandshake(config *Config, req *http.Request) error { + for _, proto := range config.Protocol { + if proto == "chat" { + config.Protocol = []string{proto} + return nil + } + } + return ErrBadWebSocketProtocol +} + +func subProtoServer(ws *Conn) { + for _, proto := range ws.Config().Protocol { + io.WriteString(ws, proto) + } +} + +func startServer() { + http.Handle("/echo", Handler(echoServer)) + http.Handle("/count", Handler(countServer)) + http.Handle("/ctrldata", Handler(ctrlAndDataServer)) + subproto := Server{ + Handshake: subProtocolHandshake, + Handler: Handler(subProtoServer), + } + http.Handle("/subproto", subproto) + server := httptest.NewServer(nil) + serverAddr = server.Listener.Addr().String() + log.Print("Test WebSocket server listening on ", serverAddr) +} + +func newConfig(t *testing.T, path string) *Config { + config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost") + return config +} + +func TestEcho(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var actual_msg = make([]byte, 512) + n, err := conn.Read(actual_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + actual_msg = actual_msg[0:n] + if !bytes.Equal(msg, actual_msg) { + t.Errorf("Echo: expected %q got %q", msg, actual_msg) + } + conn.Close() +} + +func TestAddr(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + ra := conn.RemoteAddr().String() + if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") { + t.Errorf("Bad remote addr: %v", ra) + } + la := conn.LocalAddr().String() + if !strings.HasPrefix(la, "http://") { + t.Errorf("Bad local addr: %v", la) + } + conn.Close() +} + +func TestCount(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/count"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + var count Count + count.S = "hello" + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 1 { + t.Errorf("count: expected %d got %d", 1, count.N) + } + if count.S != "hello" { + t.Errorf("count: expected %q got %q", "hello", count.S) + } + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 2 { + t.Errorf("count: expected %d got %d", 2, count.N) + } + if count.S != "hellohello" { + t.Errorf("count: expected %q got %q", "hellohello", count.S) + } + conn.Close() +} + +func TestWithQuery(t *testing.T) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/echo") + config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr)) + if err != nil { + t.Fatal("location url", err) + } + + ws, err := NewClient(config, client) + if err != nil { + t.Errorf("WebSocket handshake: %v", err) + return + } + ws.Close() +} + +func testWithProtocol(t *testing.T, subproto []string) (string, error) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/subproto") + config.Protocol = subproto + + ws, err := NewClient(config, client) + if err != nil { + return "", err + } + msg := make([]byte, 16) + n, err := ws.Read(msg) + if err != nil { + return "", err + } + ws.Close() + return string(msg[:n]), nil +} + +func TestWithProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithTwoProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"test", "chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithBadProtocol(t *testing.T) { + _, err := testWithProtocol(t, []string{"test"}) + if err != ErrBadStatus { + t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err) + } +} + +func TestHTTP(t *testing.T) { + once.Do(startServer) + + // If the client did not send a handshake that matches the protocol + // specification, the server MUST return an HTTP response with an + // appropriate error code (such as 400 Bad Request) + resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr)) + if err != nil { + t.Errorf("Get: error %#v", err) + return + } + if resp == nil { + t.Error("Get: resp is null") + return + } + if resp.StatusCode != http.StatusBadRequest { + t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode) + } +} + +func TestTrailingSpaces(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=955 + // The last runs of this create keys with trailing spaces that should not be + // generated by the client. + once.Do(startServer) + config := newConfig(t, "/echo") + for i := 0; i < 30; i++ { + // body + ws, err := DialConfig(config) + if err != nil { + t.Errorf("Dial #%d failed: %v", i, err) + break + } + ws.Close() + } +} + +func TestDialConfigBadVersion(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Version = 1234 + + _, err := DialConfig(config) + + if dialerr, ok := err.(*DialError); ok { + if dialerr.Err != ErrBadProtocolVersion { + t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err) + } + } +} + +func TestSmallBuffer(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=1145 + // Read should be able to handle reading a fragment of a frame. + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var small_msg = make([]byte, 8) + n, err := conn.Read(small_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + if !bytes.Equal(msg[:len(small_msg)], small_msg) { + t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg) + } + var second_msg = make([]byte, len(msg)) + n, err = conn.Read(second_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + second_msg = second_msg[0:n] + if !bytes.Equal(msg[len(small_msg):], second_msg) { + t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg) + } + conn.Close() +} + +var parseAuthorityTests = []struct { + in *url.URL + out string +}{ + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com", + }, + "www.google.com:443", + }, + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com:443", + }, + "www.google.com:443", + }, + // some invalid ones for parseAuthority. parseAuthority doesn't + // concern itself with the scheme unless it actually knows about it + { + &url.URL{ + Scheme: "http", + Host: "www.google.com", + }, + "www.google.com", + }, + { + &url.URL{ + Scheme: "http", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "127.0.0.1", + }, + "127.0.0.1", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "www.google.com", + }, + "www.google.com", + }, +} + +func TestParseAuthority(t *testing.T) { + for _, tt := range parseAuthorityTests { + out := parseAuthority(tt.in) + if out != tt.out { + t.Errorf("got %v; want %v", out, tt.out) + } + } +} + +type closerConn struct { + net.Conn + closed int // count of the number of times Close was called +} + +func (c *closerConn) Close() error { + c.closed++ + return c.Conn.Close() +} + +func TestClose(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/11454") + } + + once.Do(startServer) + + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + cc := closerConn{Conn: conn} + + client, err := NewClient(newConfig(t, "/echo"), &cc) + if err != nil { + t.Fatalf("WebSocket handshake: %v", err) + } + + // set the deadline to ten minutes ago, which will have expired by the time + // client.Close sends the close status frame. + conn.SetDeadline(time.Now().Add(-10 * time.Minute)) + + if err := client.Close(); err == nil { + t.Errorf("ws.Close(): expected error, got %v", err) + } + if cc.closed < 1 { + t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) + } +} + +var originTests = []struct { + req *http.Request + origin *url.URL +}{ + { + req: &http.Request{ + Header: http.Header{ + "Origin": []string{"http://www.example.com"}, + }, + }, + origin: &url.URL{ + Scheme: "http", + Host: "www.example.com", + }, + }, + { + req: &http.Request{}, + }, +} + +func TestOrigin(t *testing.T) { + conf := newConfig(t, "/echo") + conf.Version = ProtocolVersionHybi13 + for i, tt := range originTests { + origin, err := Origin(conf, tt.req) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(origin, tt.origin) { + t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) + continue + } + } +} + +func TestCtrlAndData(t *testing.T) { + once.Do(startServer) + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal(err) + } + ws, err := NewClient(newConfig(t, "/ctrldata"), c) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + b := make([]byte, 128) + for i := 0; i < 2; i++ { + data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) + if _, err := ws.Write(data); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var ctrl []byte + if i%2 != 0 { // with or without payload + ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) + } + if _, err := h.WritePing(ctrl); err != nil { + t.Fatalf("#%d: %v", i, err) + } + n, err := ws.Read(b) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + if !bytes.Equal(b[:n], data) { + t.Fatalf("#%d: got %v; want %v", i, b[:n], data) + } + } +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go new file mode 100644 index 00000000000..8d2187872d5 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -0,0 +1,88 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xsrftoken provides methods for generating and validating secure XSRF tokens. +package xsrftoken // import "golang.org/x/net/xsrftoken" + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +// Timeout is the duration for which XSRF tokens are valid. +// It is exported so clients may set cookie timeouts that match generated tokens. +const Timeout = 24 * time.Hour + +// clean sanitizes a string for inclusion in a token by replacing all ":"s. +func clean(s string) string { + return strings.Replace(s, ":", "_", -1) +} + +// Generate returns a URL-safe secure XSRF token that expires in 24 hours. +// +// key is a secret key for your application. +// userID is a unique identifier for the user. +// actionID is the action the user is taking (e.g. POSTing to a particular path). +func Generate(key, userID, actionID string) string { + return generateTokenAtTime(key, userID, actionID, time.Now()) +} + +// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. +func generateTokenAtTime(key, userID, actionID string, now time.Time) string { + // Round time up and convert to milliseconds. + milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 + + h := hmac.New(sha1.New, []byte(key)) + fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime) + + // Get the padded base64 string then removing the padding. + tok := string(h.Sum(nil)) + tok = base64.URLEncoding.EncodeToString([]byte(tok)) + tok = strings.TrimRight(tok, "=") + + return fmt.Sprintf("%s:%d", tok, milliTime) +} + +// Valid reports whether a token is a valid, unexpired token returned by Generate. +func Valid(token, key, userID, actionID string) bool { + return validTokenAtTime(token, key, userID, actionID, time.Now()) +} + +// validTokenAtTime reports whether a token is valid at the given time. +func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { + // Extract the issue time of the token. + sep := strings.LastIndex(token, ":") + if sep < 0 { + return false + } + millis, err := strconv.ParseInt(token[sep+1:], 10, 64) + if err != nil { + return false + } + issueTime := time.Unix(0, millis*1e6) + + // Check that the token is not expired. + if now.Sub(issueTime) >= Timeout { + return false + } + + // Check that the token is not from the future. + // Allow 1 minute grace period in case the token is being verified on a + // machine whose clock is behind the machine that issued the token. + if issueTime.After(now.Add(1 * time.Minute)) { + return false + } + + expected := generateTokenAtTime(key, userID, actionID, issueTime) + + // Check that the token matches the expected value. + // Use constant time comparison to avoid timing attacks. + return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go new file mode 100644 index 00000000000..9933f867131 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xsrftoken + +import ( + "encoding/base64" + "testing" + "time" +) + +const ( + key = "quay" + userID = "12345678" + actionID = "POST /form" +) + +var ( + now = time.Now() + oneMinuteFromNow = now.Add(1 * time.Minute) +) + +func TestValidToken(t *testing.T) { + tok := generateTokenAtTime(key, userID, actionID, now) + if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) { + t.Error("One second later: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { + t.Error("Just before timeout: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) { + t.Error("One minute in the past: Expected token to be valid") + } +} + +// TestSeparatorReplacement tests that separators are being correctly substituted +func TestSeparatorReplacement(t *testing.T) { + tok := generateTokenAtTime("foo:bar", "baz", "wah", now) + tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now) + if tok == tok2 { + t.Errorf("Expected generated tokens to be different") + } +} + +func TestInvalidToken(t *testing.T) { + invalidTokenTests := []struct { + name, key, userID, actionID string + t time.Time + }{ + {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, + {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, + {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, + {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)}, + {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, + } + + tok := generateTokenAtTime(key, userID, actionID, now) + for _, itt := range invalidTokenTests { + if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { + t.Errorf("%v: Expected token to be invalid", itt.name) + } + } +} + +// TestValidateBadData primarily tests that no unexpected panics are triggered +// during parsing +func TestValidateBadData(t *testing.T) { + badDataTests := []struct { + name, tok string + }{ + {"Invalid Base64", "ASDab24(@)$*=="}, + {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, + {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, + {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)}, + } + + for _, bdt := range badDataTests { + if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { + t.Errorf("%v: Expected token to be invalid", bdt.name) + } + } +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 00000000000..9bc2c12793c --- /dev/null +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - 1.5.3 + - 1.6 + +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + +install: + - mkdir -p "$GOPATH/src/google.golang.org" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc" + +script: + - make test testrace diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 407d384a7c8..36cd6f7581b 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,16 +1,39 @@ # How to contribute -We definitely welcome patches and contribution to grpc! Here is some guideline +We definitely welcome patches and contribution to grpc! Here are some guidelines and information about how to do so. -## Getting started +## Sending patches -### Legal requirements +### Getting started + +1. Check out the code: + + $ go get google.golang.org/grpc + $ cd $GOPATH/src/google.golang.org/grpc + +1. Create a fork of the grpc-go repository. +1. Add your fork as a remote: + + $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git + +1. Make changes, commit them. +1. Run the test suite: + + $ make test + +1. Push your changes to your fork: + + $ git push fork ... + +1. Open a pull request. + +## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). -### Filing Issues +## Filing Issues When filing an issue, make sure to answer these five questions: 1. What version of Go are you using (`go version`)? diff --git a/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md b/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md new file mode 100644 index 00000000000..5312b5725c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md @@ -0,0 +1,41 @@ +# Authentication + +As outlined in the [gRPC authentication guide](http://www.grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. + +# Enabling TLS on a gRPC client + +```Go +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))) +``` + +# Enabling TLS on a gRPC server + +```Go +creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) +if err != nil { + log.Fatalf("Failed to generate credentials %v", err) +} +lis, err := net.Listen("tcp", ":0") +server := grpc.NewServer(grpc.Creds(creds)) +... +server.Serve(lis) +``` + +# Authenticating with Google + +## Google Compute Engine (GCE) + +```Go +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(oauth.NewComputeEngine()))) +``` + +## JWT + +```Go +jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) +if err != nil { + log.Fatalf("Failed to create JWT credentials: %v", err) +} +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(jwtCreds))) +``` + diff --git a/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md b/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md new file mode 100644 index 00000000000..928f5572d12 --- /dev/null +++ b/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md @@ -0,0 +1,201 @@ +# Metadata + +gRPC supports sending metadata between client and server. +This doc shows how to send and receive metadata in gRPC-go. + +## Background + +Four kinds of service method: + +- [Unary RPC](http://www.grpc.io/docs/guides/concepts.html#unary-rpc) +- [Server streaming RPC](http://www.grpc.io/docs/guides/concepts.html#server-streaming-rpc) +- [Client streaming RPC](http://www.grpc.io/docs/guides/concepts.html#client-streaming-rpc) +- [Bidirectional streaming RPC](http://www.grpc.io/docs/guides/concepts.html#bidirectional-streaming-rpc) + +And concept of [metadata](http://www.grpc.io/docs/guides/concepts.html#metadata). + +## Constructing metadata + +A metadata can be created using package [metadata](https://godoc.org/google.golang.org/grpc/metadata). +The type MD is actually a map from string to a list of strings: + +```go +type MD map[string][]string +``` + +Metadata can be read like a normal map. +Note that the value type of this map is `[]string`, +so that users can attach multiple values using a single key. + +### Creating a new metadata + +A metadata can be created from a `map[string]string` using function `New`: + +```go +md := metadata.New(map[string]string{"key1": "val1", "key2": "val2"}) +``` + +Another way is to use `Pairs`. +Values with the same key will be merged into a list: + +```go +md := metadata.Pairs( + "key1", "val1", + "key1", "val1-2", // "key1" will have map value []string{"val1", "val1-2"} + "key2", "val2", +) +``` + +__Note:__ all the keys will be automatically converted to lowercase, +so "key1" and "kEy1" will be the same key and their values will be merged into the same list. +This happens for both `New` and `Pairs`. + +### Storing binary data in metadata + +In metadata, keys are always strings. But values can be strings or binary data. +To store binary data value in metadata, simply add "-bin" surfix to the key. +The values with "-bin" surffixed keys will be encoded when creating the metadata: + +```go +md := metadata.Pairs( + "key", "string value", + "key-bin", string([]byte{96, 102}), // this binary data will be encoded (base64) before sending + // and will be decoded after being transferred. +) +``` + +## Retrieving metadata from context + +Metadata can be retrieved from context using `FromContext`: + +```go +func (s *server) SomeRPC(ctx context.Context, in *pb.SomeRequest) (*pb.SomeResponse, err) { + md, ok := metadata.FromContext(ctx) + // do something with metadata +} +``` + +## Sending and receiving metadata - client side + +[//]: # "TODO: uncomment next line after example source added" +[//]: # "Real metadata sending and receiving examples are available [here](TODO:example_dir)." + +### Sending metadata + +To send metadata to server, the client can wrap the metadata into a context using `NewContext`, and make the RPC with this context: + +```go +md := metadata.Pairs("key", "val") + +// create a new context with this metadata +ctx := metadata.NewContext(context.Background(), md) + +// make unary RPC +response, err := client.SomeRPC(ctx, someRequest) + +// or make streaming RPC +stream, err := client.SomeStreamingRPC(ctx) +``` +### Receiving metadata + +Metadata that a client can receive includes header and trailer. + +#### Unary call + +Header and trailer sent along with a unary call can be retrieved using function [Header](https://godoc.org/google.golang.org/grpc#Header) and [Trailer](https://godoc.org/google.golang.org/grpc#Trailer) in [CallOption](https://godoc.org/google.golang.org/grpc#CallOption): + +```go +var header, trailer metadata.MD // variable to store header and trailer +r, err := client.SomeRPC( + ctx, + someRequest, + grpc.Header(&header), // will retrieve header + grpc.Trailer(&trailer), // will retrieve trailer +) + +// do something with header and trailer +``` + +#### Streaming call + +For streaming calls including: + +- Server streaming RPC +- Client streaming RPC +- Bidirectional streaming RPC + +Header and trailer can be retrieved from the returned stream using function `Header` and `Trailer` in interface [ClientStream](https://godoc.org/google.golang.org/grpc#ClientStream): + +```go +stream, err := client.SomeStreamingRPC(ctx) + +// retrieve header +header, err := stream.Header() + +// retrieve trailer +trailer := stream.Trailer() + +``` + +## Sending and receiving metadata - server side + +[//]: # "TODO: uncomment next line after example source added" +[//]: # "Real metadata sending and receiving examples are available [here](TODO:example_dir)." + +### Receiving metadata + +To read metadata sent by the client, the server needs to retrieve it from RPC context. +If it is a unary call, the RPC handler's context can be used. +For streaming calls, the server needs to get context from the stream. + +#### Unary call + +```go +func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) { + md, ok := metadata.FromContext(ctx) + // do something with metadata +} +``` + +#### Streaming call + +```go +func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error { + md, ok := metadata.FromContext(stream.Context()) // get context from stream + // do something with metadata +} +``` + +### Sending metadata + +#### Unary call + +To send header and trailer to client in unary call, the server can call [SendHeader](https://godoc.org/google.golang.org/grpc#SendHeader) and [SetTrailer](https://godoc.org/google.golang.org/grpc#SetTrailer) functions in module [grpc](https://godoc.org/google.golang.org/grpc). +These two functions take a context as the first parameter. +It should be the RPC handler's context or one derived from it: + +```go +func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) { + // create and send header + header := metadata.Pairs("header-key", "val") + grpc.SendHeader(ctx, header) + // create and set trailer + trailer := metadata.Pairs("trailer-key", "val") + grpc.SetTrailer(ctx, trailer) +} +``` + +#### Streaming call + +For streaming calls, header and trailer can be sent using function `SendHeader` and `SetTrailer` in interface [ServerStream](https://godoc.org/google.golang.org/grpc#ServerStream): + +```go +func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error { + // create and send header + header := metadata.Pairs("header-key", "val") + stream.SendHeader(header) + // create and set trailer + trailer := metadata.Pairs("trailer-key", "val") + stream.SetTrailer(trailer) +} +``` diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 5bc38be2092..03bb01f0b35 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,15 +1,3 @@ -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - all: test testrace deps: @@ -32,9 +20,10 @@ proto: echo "error: protoc not installed" >&2; \ exit 1; \ fi - go get -v github.com/golang/protobuf/protoc-gen-go - for file in $$(git ls-files '*.proto'); do \ - protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ + go get -u -v github.com/golang/protobuf/protoc-gen-go + # use $$dir as the root for all proto files in the same directory + for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ + protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ done test: testdeps @@ -44,7 +33,20 @@ testrace: testdeps go test -v -race -cpu 1,4 google.golang.org/grpc/... clean: - go clean google.golang.org/grpc/... + go clean -i google.golang.org/grpc/... coverage: testdeps - goveralls -v google.golang.org/grpc/... + ./coverage.sh --coveralls + +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + coverage diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS index 619f9dbfe63..69b47959fab 100644 --- a/vendor/google.golang.org/grpc/PATENTS +++ b/vendor/google.golang.org/grpc/PATENTS @@ -1,22 +1,22 @@ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by -Google as part of the GRPC project. +Google as part of the gRPC project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this -implementation of GRPC, where such license applies only to those patent +implementation of gRPC, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this -implementation of GRPC. This grant does not include claims that would be +implementation of gRPC. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of GRPC or any code incorporated within this -implementation of GRPC constitutes direct or contributory patent +that this implementation of gRPC or any code incorporated within this +implementation of gRPC constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of GRPC +rights granted to you under this License for this implementation of gRPC shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 37b05f0953d..90e9453d52e 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -7,7 +7,7 @@ The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open s Installation ------------ -To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: +To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get google.golang.org/grpc @@ -16,7 +16,7 @@ $ go get google.golang.org/grpc Prerequisites ------------- -This requires Go 1.4 or above. +This requires Go 1.5 or later . Constraints ----------- diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000000..52f4f10fc25 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,80 @@ +package grpc + +import ( + "math/rand" + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var ( + DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, + } +) + +// backoffStrategy defines the methodology for backing off after a grpc +// connection failure. +// +// This is unexported until the gRPC project decides whether or not to allow +// alternative backoff strategies. Once a decision is made, this type and its +// method may be exported. +type backoffStrategy interface { + // backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + backoff(retries int) time.Duration +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration + + // TODO(stevvooe): The following fields are not exported, as allowing + // changes would violate the current gRPC specification for backoff. If + // gRPC decides to allow more interesting backoff strategies, these fields + // may be opened up in the future. + + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay time.Duration + + // factor is applied to the backoff after each retry. + factor float64 + + // jitter provides a range to randomize backoff delays. + jitter float64 +} + +func setDefaults(bc *BackoffConfig) { + md := bc.MaxDelay + *bc = DefaultBackoffConfig + + if md > 0 { + bc.MaxDelay = md + } +} + +func (bc BackoffConfig) backoff(retries int) (t time.Duration) { + if retries == 0 { + return bc.baseDelay + } + backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/backoff_test.go b/vendor/google.golang.org/grpc/backoff_test.go new file mode 100644 index 00000000000..bfca7b17650 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff_test.go @@ -0,0 +1,11 @@ +package grpc + +import "testing" + +func TestBackoffConfigDefaults(t *testing.T) { + b := BackoffConfig{} + setDefaults(&b) + if b != DefaultBackoffConfig { + t.Fatalf("expected BackoffConfig to pickup default parameters: %v != %v", b, DefaultBackoffConfig) + } +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 00000000000..419e2146113 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,385 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerGetOptions configures a Get call. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// downErr implements net.Error. It is constructed by gRPC internals and passed to the down +// call of Balancer. +type downErr struct { + timeout bool + temporary bool + desc string +} + +func (e downErr) Error() string { return e.desc } +func (e downErr) Timeout() bool { return e.timeout } +func (e downErr) Temporary() bool { return e.temporary } + +func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { + return downErr{ + timeout: timeout, + temporary: temporary, + desc: fmt.Sprintf(format, a...), + } +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Println("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string) error { + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = fmt.Errorf("there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/balancer_test.go b/vendor/google.golang.org/grpc/balancer_test.go new file mode 100644 index 00000000000..d0cf0611cda --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_test.go @@ -0,0 +1,322 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/naming" +) + +type testWatcher struct { + // the channel to receives name resolution updates + update chan *naming.Update + // the side channel to get to know how many updates in a batch + side chan int + // the channel to notifiy update injector that the update reading is done + readDone chan int +} + +func (w *testWatcher) Next() (updates []*naming.Update, err error) { + n := <-w.side + if n == 0 { + return nil, fmt.Errorf("w.side is closed") + } + for i := 0; i < n; i++ { + u := <-w.update + if u != nil { + updates = append(updates, u) + } + } + w.readDone <- 0 + return +} + +func (w *testWatcher) Close() { +} + +// Inject naming resolution updates to the testWatcher. +func (w *testWatcher) inject(updates []*naming.Update) { + w.side <- len(updates) + for _, u := range updates { + w.update <- u + } + <-w.readDone +} + +type testNameResolver struct { + w *testWatcher + addr string +} + +func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) { + r.w = &testWatcher{ + update: make(chan *naming.Update, 1), + side: make(chan int, 1), + readDone: make(chan int), + } + r.w.side <- 1 + r.w.update <- &naming.Update{ + Op: naming.Add, + Addr: r.addr, + } + go func() { + <-r.w.readDone + }() + return r.w, nil +} + +func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver) { + var servers []*server + for i := 0; i < numServers; i++ { + s := newTestServer() + servers = append(servers, s) + go s.start(t, 0, maxStreams) + s.wait(t, 2*time.Second) + } + // Point to server[0] + addr := "127.0.0.1:" + servers[0].port + return servers, &testNameResolver{ + addr: addr, + } +} + +func TestNameDiscovery(t *testing.T) { + // Start 2 servers on 2 ports. + numServers := 2 + servers, r := startServers(t, numServers, math.MaxUint32) + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + req := "port" + var reply string + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) + } + // Inject the name resolution change to remove servers[0] and add servers[1]. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Delete, + Addr: "127.0.0.1:" + servers[0].port, + }) + updates = append(updates, &naming.Update{ + Op: naming.Add, + Addr: "127.0.0.1:" + servers[1].port, + }) + r.w.inject(updates) + // Loop until the rpcs in flight talks to servers[1]. + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + cc.Close() + for i := 0; i < numServers; i++ { + servers[i].stop() + } +} + +func TestEmptyAddrs(t *testing.T) { + servers, r := startServers(t, 1, math.MaxUint32) + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, ", err, reply, expectedResponse) + } + // Inject name resolution change to remove the server so that there is no address + // available after that. + u := &naming.Update{ + Op: naming.Delete, + Addr: "127.0.0.1:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until the above updates apply. + for { + time.Sleep(10 * time.Millisecond) + ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil { + break + } + } + cc.Close() + servers[0].stop() +} + +func TestRoundRobin(t *testing.T) { + // Start 3 servers on 3 ports. + numServers := 3 + servers, r := startServers(t, numServers, math.MaxUint32) + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + // Add servers[1] to the service discovery. + u := &naming.Update{ + Op: naming.Add, + Addr: "127.0.0.1:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + req := "port" + var reply string + // Loop until servers[1] is up + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + // Add server2[2] to the service discovery. + u = &naming.Update{ + Op: naming.Add, + Addr: "127.0.0.1:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until both servers[2] are up. + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port { + break + } + time.Sleep(10 * time.Millisecond) + } + // Check the incoming RPCs served in a round-robin manner. + for i := 0; i < 10; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[i%numServers].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", i, err, servers[i%numServers].port) + } + } + cc.Close() + for i := 0; i < numServers; i++ { + servers[i].stop() + } +} + +func TestCloseWithPendingRPC(t *testing.T) { + servers, r := startServers(t, 1, math.MaxUint32) + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) + } + // Remove the server. + updates := []*naming.Update{&naming.Update{ + Op: naming.Delete, + Addr: "127.0.0.1:" + servers[0].port, + }} + r.w.inject(updates) + // Loop until the above update applies. + for { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + break + } + time.Sleep(10 * time.Millisecond) + } + // Issue 2 RPCs which should be completed with error status once cc is closed. + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + go func() { + defer wg.Done() + var reply string + time.Sleep(5 * time.Millisecond) + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + time.Sleep(5 * time.Millisecond) + cc.Close() + wg.Wait() + servers[0].stop() +} + +func TestGetOnWaitChannel(t *testing.T) { + servers, r := startServers(t, 1, math.MaxUint32) + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + // Remove all servers so that all upcoming RPCs will block on waitCh. + updates := []*naming.Update{&naming.Update{ + Op: naming.Delete, + Addr: "127.0.0.1:" + servers[0].port, + }} + r.w.inject(updates) + for { + var reply string + ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + break + } + time.Sleep(10 * time.Millisecond) + } + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) + } + }() + // Add a connected server to get the above RPC through. + updates = []*naming.Update{&naming.Update{ + Op: naming.Add, + Addr: "127.0.0.1:" + servers[0].port, + }} + r.w.inject(updates) + // Wait until the above RPC succeeds. + wg.Wait() + cc.Close() + servers[0].stop() +} diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark.go b/vendor/google.golang.org/grpc/benchmark/benchmark.go new file mode 100644 index 00000000000..d1143270848 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchmark.go @@ -0,0 +1,224 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks. +*/ +package benchmark + +import ( + "fmt" + "io" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/grpclog" +) + +func newPayload(t testpb.PayloadType, size int) *testpb.Payload { + if size < 0 { + grpclog.Fatalf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + default: + grpclog.Fatalf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t, + Body: body, + } +} + +type testServer struct { +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), + }, nil +} + +func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + if err := stream.Send(&testpb.SimpleResponse{ + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), + }); err != nil { + return err + } + } +} + +// byteBufServer is a gRPC server that sends and receives byte buffer. +// The purpose is to benchmark the gRPC performance without protobuf serialization/deserialization overhead. +type byteBufServer struct { + respSize int32 +} + +// UnaryCall is an empty function and is not used for benchmark. +// If bytebuf UnaryCall benchmark is needed later, the function body needs to be updated. +func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil +} + +func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { + for { + var in []byte + err := stream.(grpc.ServerStream).RecvMsg(&in) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + out := make([]byte, s.respSize) + if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil { + return err + } + } +} + +// ServerInfo contains the information to create a gRPC benchmark server. +type ServerInfo struct { + // Addr is the address of the server. + Addr string + + // Type is the type of the server. + // It should be "protobuf" or "bytebuf". + Type string + + // Metadata is an optional configuration. + // For "protobuf", it's ignored. + // For "bytebuf", it should be an int representing response size. + Metadata interface{} +} + +// StartServer starts a gRPC server serving a benchmark service according to info. +// It returns its listen address and a function to stop the server. +func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) { + lis, err := net.Listen("tcp", info.Addr) + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + s := grpc.NewServer(opts...) + switch info.Type { + case "protobuf": + testpb.RegisterBenchmarkServiceServer(s, &testServer{}) + case "bytebuf": + respSize, ok := info.Metadata.(int32) + if !ok { + grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type) + } + testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) + default: + grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type) + } + go s.Serve(lis) + return lis.Addr().String(), func() { + s.Stop() + } +} + +// DoUnaryCall performs an unary RPC with given stub and request and response sizes. +func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if _, err := tc.UnaryCall(context.Background(), req); err != nil { + return fmt.Errorf("/BenchmarkService/UnaryCall(_, _) = _, %v, want _, ", err) + } + return nil +} + +// DoStreamingRoundTrip performs a round trip for a single streaming rpc. +func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if err := stream.Send(req); err != nil { + return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) + } + if _, err := stream.Recv(); err != nil { + // EOF is a valid error here. + if err == io.EOF { + return nil + } + return fmt.Errorf("/BenchmarkService/StreamingCall.Recv(_) = %v, want ", err) + } + return nil +} + +// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using a custom codec for byte buffer. +func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { + out := make([]byte, reqSize) + if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { + return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).SendMsg(_) = %v, want ", err) + } + var in []byte + if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { + // EOF is a valid error here. + if err == io.EOF { + return nil + } + return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).RecvMsg(_) = %v, want ", err) + } + return nil +} + +// NewClientConn creates a gRPC client connection to addr. +func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { + conn, err := grpc.Dial(addr, opts...) + if err != nil { + grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) + } + return conn +} diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark_test.go b/vendor/google.golang.org/grpc/benchmark/benchmark_test.go new file mode 100644 index 00000000000..8fe3fa1581a --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchmark_test.go @@ -0,0 +1,202 @@ +package benchmark + +import ( + "os" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +func runUnary(b *testing.B, maxConcurrentCalls int) { + s := stats.AddStats(b, 38) + b.StopTimer() + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) + defer stopper() + conn := NewClientConn(target, grpc.WithInsecure()) + tc := testpb.NewBenchmarkServiceClient(conn) + + // Warm up connection. + for i := 0; i < 10; i++ { + unaryCaller(tc) + } + ch := make(chan int, maxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(maxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < maxConcurrentCalls; i++ { + go func() { + for range ch { + start := time.Now() + unaryCaller(tc) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.StartTimer() + for i := 0; i < b.N; i++ { + ch <- i + } + b.StopTimer() + close(ch) + wg.Wait() + conn.Close() +} + +func runStream(b *testing.B, maxConcurrentCalls int) { + s := stats.AddStats(b, 38) + b.StopTimer() + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) + defer stopper() + conn := NewClientConn(target, grpc.WithInsecure()) + tc := testpb.NewBenchmarkServiceClient(conn) + + // Warm up connection. + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for i := 0; i < 10; i++ { + streamCaller(stream) + } + + ch := make(chan int, maxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(maxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < maxConcurrentCalls; i++ { + go func() { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for range ch { + start := time.Now() + streamCaller(stream) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.StartTimer() + for i := 0; i < b.N; i++ { + ch <- i + } + b.StopTimer() + close(ch) + wg.Wait() + conn.Close() +} +func unaryCaller(client testpb.BenchmarkServiceClient) { + if err := DoUnaryCall(client, 1, 1); err != nil { + grpclog.Fatalf("DoUnaryCall failed: %v", err) + } +} + +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { + if err := DoStreamingRoundTrip(stream, 1, 1); err != nil { + grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) + } +} + +func BenchmarkClientStreamc1(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 1) +} + +func BenchmarkClientStreamc8(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 8) +} + +func BenchmarkClientStreamc64(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 64) +} + +func BenchmarkClientStreamc512(b *testing.B) { + grpc.EnableTracing = true + runStream(b, 512) +} +func BenchmarkClientUnaryc1(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 1) +} + +func BenchmarkClientUnaryc8(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 8) +} + +func BenchmarkClientUnaryc64(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 64) +} + +func BenchmarkClientUnaryc512(b *testing.B) { + grpc.EnableTracing = true + runUnary(b, 512) +} + +func BenchmarkClientStreamNoTracec1(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 1) +} + +func BenchmarkClientStreamNoTracec8(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 8) +} + +func BenchmarkClientStreamNoTracec64(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 64) +} + +func BenchmarkClientStreamNoTracec512(b *testing.B) { + grpc.EnableTracing = false + runStream(b, 512) +} +func BenchmarkClientUnaryNoTracec1(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 1) +} + +func BenchmarkClientUnaryNoTracec8(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 8) +} + +func BenchmarkClientUnaryNoTracec64(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 64) +} + +func BenchmarkClientUnaryNoTracec512(b *testing.B) { + grpc.EnableTracing = false + runUnary(b, 512) +} + +func TestMain(m *testing.M) { + os.Exit(stats.RunTestMain(m)) +} diff --git a/vendor/google.golang.org/grpc/benchmark/client/main.go b/vendor/google.golang.org/grpc/benchmark/client/main.go new file mode 100644 index 00000000000..5dfbe6a37f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/client/main.go @@ -0,0 +1,162 @@ +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +var ( + server = flag.String("server", "", "The server address") + maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs") + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client") + trace = flag.Bool("trace", true, "Whether tracing is on") + rpcType = flag.Int("rpc_type", 0, + `Configure different client rpc type. Valid options are: + 0 : unary call; + 1 : streaming call.`) +) + +func unaryCaller(client testpb.BenchmarkServiceClient) { + benchmark.DoUnaryCall(client, 1, 1) +} + +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { + benchmark.DoStreamingRoundTrip(stream, 1, 1) +} + +func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.BenchmarkServiceClient) { + s = stats.NewStats(256) + conn = benchmark.NewClientConn(*server) + tc = testpb.NewBenchmarkServiceClient(conn) + return s, conn, tc +} + +func closeLoopUnary() { + s, conn, tc := buildConnection() + + for i := 0; i < 100; i++ { + unaryCaller(tc) + } + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + for _ = range ch { + start := time.Now() + unaryCaller(tc) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) + +} + +func closeLoopStream() { + s, conn, tc := buildConnection() + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + // Distribute RPCs over maxConcurrentCalls workers. + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + // Do some warm up. + for i := 0; i < 100; i++ { + streamCaller(stream) + } + for range ch { + start := time.Now() + streamCaller(stream) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) +} + +func main() { + flag.Parse() + grpc.EnableTracing = *trace + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Client profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + switch *rpcType { + case 0: + closeLoopUnary() + case 1: + closeLoopStream() + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go new file mode 100644 index 00000000000..8afae814544 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go @@ -0,0 +1,977 @@ +// Code generated by protoc-gen-go. +// source: control.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + control.proto + messages.proto + payloads.proto + services.proto + stats.proto + +It has these top-level messages: + PoissonParams + UniformParams + DeterministicParams + ParetoParams + ClosedLoopParams + LoadParams + SecurityParams + ClientConfig + ClientStatus + Mark + ClientArgs + ServerConfig + ServerArgs + ServerStatus + CoreRequest + CoreResponse + Void + Scenario + Scenarios + Payload + EchoStatus + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse + ReconnectParams + ReconnectInfo + ByteBufferParams + SimpleProtoParams + ComplexProtoParams + PayloadConfig + ServerStats + HistogramParams + HistogramData + ClientStats +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ClientType int32 + +const ( + ClientType_SYNC_CLIENT ClientType = 0 + ClientType_ASYNC_CLIENT ClientType = 1 +) + +var ClientType_name = map[int32]string{ + 0: "SYNC_CLIENT", + 1: "ASYNC_CLIENT", +} +var ClientType_value = map[string]int32{ + "SYNC_CLIENT": 0, + "ASYNC_CLIENT": 1, +} + +func (x ClientType) String() string { + return proto.EnumName(ClientType_name, int32(x)) +} +func (ClientType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type ServerType int32 + +const ( + ServerType_SYNC_SERVER ServerType = 0 + ServerType_ASYNC_SERVER ServerType = 1 + ServerType_ASYNC_GENERIC_SERVER ServerType = 2 +) + +var ServerType_name = map[int32]string{ + 0: "SYNC_SERVER", + 1: "ASYNC_SERVER", + 2: "ASYNC_GENERIC_SERVER", +} +var ServerType_value = map[string]int32{ + "SYNC_SERVER": 0, + "ASYNC_SERVER": 1, + "ASYNC_GENERIC_SERVER": 2, +} + +func (x ServerType) String() string { + return proto.EnumName(ServerType_name, int32(x)) +} +func (ServerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type RpcType int32 + +const ( + RpcType_UNARY RpcType = 0 + RpcType_STREAMING RpcType = 1 +) + +var RpcType_name = map[int32]string{ + 0: "UNARY", + 1: "STREAMING", +} +var RpcType_value = map[string]int32{ + "UNARY": 0, + "STREAMING": 1, +} + +func (x RpcType) String() string { + return proto.EnumName(RpcType_name, int32(x)) +} +func (RpcType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Parameters of poisson process distribution, which is a good representation +// of activity coming in from independent identical stationary sources. +type PoissonParams struct { + // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). + OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` +} + +func (m *PoissonParams) Reset() { *m = PoissonParams{} } +func (m *PoissonParams) String() string { return proto.CompactTextString(m) } +func (*PoissonParams) ProtoMessage() {} +func (*PoissonParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type UniformParams struct { + InterarrivalLo float64 `protobuf:"fixed64,1,opt,name=interarrival_lo,json=interarrivalLo" json:"interarrival_lo,omitempty"` + InterarrivalHi float64 `protobuf:"fixed64,2,opt,name=interarrival_hi,json=interarrivalHi" json:"interarrival_hi,omitempty"` +} + +func (m *UniformParams) Reset() { *m = UniformParams{} } +func (m *UniformParams) String() string { return proto.CompactTextString(m) } +func (*UniformParams) ProtoMessage() {} +func (*UniformParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type DeterministicParams struct { + OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` +} + +func (m *DeterministicParams) Reset() { *m = DeterministicParams{} } +func (m *DeterministicParams) String() string { return proto.CompactTextString(m) } +func (*DeterministicParams) ProtoMessage() {} +func (*DeterministicParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type ParetoParams struct { + InterarrivalBase float64 `protobuf:"fixed64,1,opt,name=interarrival_base,json=interarrivalBase" json:"interarrival_base,omitempty"` + Alpha float64 `protobuf:"fixed64,2,opt,name=alpha" json:"alpha,omitempty"` +} + +func (m *ParetoParams) Reset() { *m = ParetoParams{} } +func (m *ParetoParams) String() string { return proto.CompactTextString(m) } +func (*ParetoParams) ProtoMessage() {} +func (*ParetoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// Once an RPC finishes, immediately start a new one. +// No configuration parameters needed. +type ClosedLoopParams struct { +} + +func (m *ClosedLoopParams) Reset() { *m = ClosedLoopParams{} } +func (m *ClosedLoopParams) String() string { return proto.CompactTextString(m) } +func (*ClosedLoopParams) ProtoMessage() {} +func (*ClosedLoopParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type LoadParams struct { + // Types that are valid to be assigned to Load: + // *LoadParams_ClosedLoop + // *LoadParams_Poisson + // *LoadParams_Uniform + // *LoadParams_Determ + // *LoadParams_Pareto + Load isLoadParams_Load `protobuf_oneof:"load"` +} + +func (m *LoadParams) Reset() { *m = LoadParams{} } +func (m *LoadParams) String() string { return proto.CompactTextString(m) } +func (*LoadParams) ProtoMessage() {} +func (*LoadParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isLoadParams_Load interface { + isLoadParams_Load() +} + +type LoadParams_ClosedLoop struct { + ClosedLoop *ClosedLoopParams `protobuf:"bytes,1,opt,name=closed_loop,json=closedLoop,oneof"` +} +type LoadParams_Poisson struct { + Poisson *PoissonParams `protobuf:"bytes,2,opt,name=poisson,oneof"` +} +type LoadParams_Uniform struct { + Uniform *UniformParams `protobuf:"bytes,3,opt,name=uniform,oneof"` +} +type LoadParams_Determ struct { + Determ *DeterministicParams `protobuf:"bytes,4,opt,name=determ,oneof"` +} +type LoadParams_Pareto struct { + Pareto *ParetoParams `protobuf:"bytes,5,opt,name=pareto,oneof"` +} + +func (*LoadParams_ClosedLoop) isLoadParams_Load() {} +func (*LoadParams_Poisson) isLoadParams_Load() {} +func (*LoadParams_Uniform) isLoadParams_Load() {} +func (*LoadParams_Determ) isLoadParams_Load() {} +func (*LoadParams_Pareto) isLoadParams_Load() {} + +func (m *LoadParams) GetLoad() isLoadParams_Load { + if m != nil { + return m.Load + } + return nil +} + +func (m *LoadParams) GetClosedLoop() *ClosedLoopParams { + if x, ok := m.GetLoad().(*LoadParams_ClosedLoop); ok { + return x.ClosedLoop + } + return nil +} + +func (m *LoadParams) GetPoisson() *PoissonParams { + if x, ok := m.GetLoad().(*LoadParams_Poisson); ok { + return x.Poisson + } + return nil +} + +func (m *LoadParams) GetUniform() *UniformParams { + if x, ok := m.GetLoad().(*LoadParams_Uniform); ok { + return x.Uniform + } + return nil +} + +func (m *LoadParams) GetDeterm() *DeterministicParams { + if x, ok := m.GetLoad().(*LoadParams_Determ); ok { + return x.Determ + } + return nil +} + +func (m *LoadParams) GetPareto() *ParetoParams { + if x, ok := m.GetLoad().(*LoadParams_Pareto); ok { + return x.Pareto + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadParams) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadParams_OneofMarshaler, _LoadParams_OneofUnmarshaler, _LoadParams_OneofSizer, []interface{}{ + (*LoadParams_ClosedLoop)(nil), + (*LoadParams_Poisson)(nil), + (*LoadParams_Uniform)(nil), + (*LoadParams_Determ)(nil), + (*LoadParams_Pareto)(nil), + } +} + +func _LoadParams_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadParams) + // load + switch x := m.Load.(type) { + case *LoadParams_ClosedLoop: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClosedLoop); err != nil { + return err + } + case *LoadParams_Poisson: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Poisson); err != nil { + return err + } + case *LoadParams_Uniform: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Uniform); err != nil { + return err + } + case *LoadParams_Determ: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Determ); err != nil { + return err + } + case *LoadParams_Pareto: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Pareto); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadParams.Load has unexpected type %T", x) + } + return nil +} + +func _LoadParams_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadParams) + switch tag { + case 1: // load.closed_loop + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClosedLoopParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_ClosedLoop{msg} + return true, err + case 2: // load.poisson + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PoissonParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Poisson{msg} + return true, err + case 3: // load.uniform + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UniformParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Uniform{msg} + return true, err + case 4: // load.determ + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeterministicParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Determ{msg} + return true, err + case 5: // load.pareto + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ParetoParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Pareto{msg} + return true, err + default: + return false, nil + } +} + +func _LoadParams_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadParams) + // load + switch x := m.Load.(type) { + case *LoadParams_ClosedLoop: + s := proto.Size(x.ClosedLoop) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Poisson: + s := proto.Size(x.Poisson) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Uniform: + s := proto.Size(x.Uniform) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Determ: + s := proto.Size(x.Determ) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Pareto: + s := proto.Size(x.Pareto) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// presence of SecurityParams implies use of TLS +type SecurityParams struct { + UseTestCa bool `protobuf:"varint,1,opt,name=use_test_ca,json=useTestCa" json:"use_test_ca,omitempty"` + ServerHostOverride string `protobuf:"bytes,2,opt,name=server_host_override,json=serverHostOverride" json:"server_host_override,omitempty"` +} + +func (m *SecurityParams) Reset() { *m = SecurityParams{} } +func (m *SecurityParams) String() string { return proto.CompactTextString(m) } +func (*SecurityParams) ProtoMessage() {} +func (*SecurityParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type ClientConfig struct { + // List of targets to connect to. At least one target needs to be specified. + ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets,json=serverTargets" json:"server_targets,omitempty"` + ClientType ClientType `protobuf:"varint,2,opt,name=client_type,json=clientType,enum=grpc.testing.ClientType" json:"client_type,omitempty"` + SecurityParams *SecurityParams `protobuf:"bytes,3,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` + // How many concurrent RPCs to start for each channel. + // For synchronous client, use a separate thread for each outstanding RPC. + OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel,json=outstandingRpcsPerChannel" json:"outstanding_rpcs_per_channel,omitempty"` + // Number of independent client channels to create. + // i-th channel will connect to server_target[i % server_targets.size()] + ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels,json=clientChannels" json:"client_channels,omitempty"` + // Only for async client. Number of threads to use to start/manage RPCs. + AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads,json=asyncClientThreads" json:"async_client_threads,omitempty"` + RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,json=rpcType,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` + // The requested load for the entire client (aggregated over all the threads). + LoadParams *LoadParams `protobuf:"bytes,10,opt,name=load_params,json=loadParams" json:"load_params,omitempty"` + PayloadConfig *PayloadConfig `protobuf:"bytes,11,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` + HistogramParams *HistogramParams `protobuf:"bytes,12,opt,name=histogram_params,json=histogramParams" json:"histogram_params,omitempty"` + // Specify the cores we should run the client on, if desired + CoreList []int32 `protobuf:"varint,13,rep,name=core_list,json=coreList" json:"core_list,omitempty"` + CoreLimit int32 `protobuf:"varint,14,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` +} + +func (m *ClientConfig) Reset() { *m = ClientConfig{} } +func (m *ClientConfig) String() string { return proto.CompactTextString(m) } +func (*ClientConfig) ProtoMessage() {} +func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ClientConfig) GetSecurityParams() *SecurityParams { + if m != nil { + return m.SecurityParams + } + return nil +} + +func (m *ClientConfig) GetLoadParams() *LoadParams { + if m != nil { + return m.LoadParams + } + return nil +} + +func (m *ClientConfig) GetPayloadConfig() *PayloadConfig { + if m != nil { + return m.PayloadConfig + } + return nil +} + +func (m *ClientConfig) GetHistogramParams() *HistogramParams { + if m != nil { + return m.HistogramParams + } + return nil +} + +type ClientStatus struct { + Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ClientStatus) Reset() { *m = ClientStatus{} } +func (m *ClientStatus) String() string { return proto.CompactTextString(m) } +func (*ClientStatus) ProtoMessage() {} +func (*ClientStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ClientStatus) GetStats() *ClientStats { + if m != nil { + return m.Stats + } + return nil +} + +// Request current stats +type Mark struct { + // if true, the stats will be reset after taking their snapshot. + Reset_ bool `protobuf:"varint,1,opt,name=reset" json:"reset,omitempty"` +} + +func (m *Mark) Reset() { *m = Mark{} } +func (m *Mark) String() string { return proto.CompactTextString(m) } +func (*Mark) ProtoMessage() {} +func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type ClientArgs struct { + // Types that are valid to be assigned to Argtype: + // *ClientArgs_Setup + // *ClientArgs_Mark + Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` +} + +func (m *ClientArgs) Reset() { *m = ClientArgs{} } +func (m *ClientArgs) String() string { return proto.CompactTextString(m) } +func (*ClientArgs) ProtoMessage() {} +func (*ClientArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type isClientArgs_Argtype interface { + isClientArgs_Argtype() +} + +type ClientArgs_Setup struct { + Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup,oneof"` +} +type ClientArgs_Mark struct { + Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` +} + +func (*ClientArgs_Setup) isClientArgs_Argtype() {} +func (*ClientArgs_Mark) isClientArgs_Argtype() {} + +func (m *ClientArgs) GetArgtype() isClientArgs_Argtype { + if m != nil { + return m.Argtype + } + return nil +} + +func (m *ClientArgs) GetSetup() *ClientConfig { + if x, ok := m.GetArgtype().(*ClientArgs_Setup); ok { + return x.Setup + } + return nil +} + +func (m *ClientArgs) GetMark() *Mark { + if x, ok := m.GetArgtype().(*ClientArgs_Mark); ok { + return x.Mark + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ClientArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ClientArgs_OneofMarshaler, _ClientArgs_OneofUnmarshaler, _ClientArgs_OneofSizer, []interface{}{ + (*ClientArgs_Setup)(nil), + (*ClientArgs_Mark)(nil), + } +} + +func _ClientArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ClientArgs) + // argtype + switch x := m.Argtype.(type) { + case *ClientArgs_Setup: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Setup); err != nil { + return err + } + case *ClientArgs_Mark: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mark); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClientArgs.Argtype has unexpected type %T", x) + } + return nil +} + +func _ClientArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ClientArgs) + switch tag { + case 1: // argtype.setup + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientConfig) + err := b.DecodeMessage(msg) + m.Argtype = &ClientArgs_Setup{msg} + return true, err + case 2: // argtype.mark + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mark) + err := b.DecodeMessage(msg) + m.Argtype = &ClientArgs_Mark{msg} + return true, err + default: + return false, nil + } +} + +func _ClientArgs_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ClientArgs) + // argtype + switch x := m.Argtype.(type) { + case *ClientArgs_Setup: + s := proto.Size(x.Setup) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ClientArgs_Mark: + s := proto.Size(x.Mark) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ServerConfig struct { + ServerType ServerType `protobuf:"varint,1,opt,name=server_type,json=serverType,enum=grpc.testing.ServerType" json:"server_type,omitempty"` + SecurityParams *SecurityParams `protobuf:"bytes,2,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` + // Port on which to listen. Zero means pick unused port. + Port int32 `protobuf:"varint,4,opt,name=port" json:"port,omitempty"` + // Only for async server. Number of threads used to serve the requests. + AsyncServerThreads int32 `protobuf:"varint,7,opt,name=async_server_threads,json=asyncServerThreads" json:"async_server_threads,omitempty"` + // Specify the number of cores to limit server to, if desired + CoreLimit int32 `protobuf:"varint,8,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` + // payload config, used in generic server + PayloadConfig *PayloadConfig `protobuf:"bytes,9,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` + // Specify the cores we should run the server on, if desired + CoreList []int32 `protobuf:"varint,10,rep,name=core_list,json=coreList" json:"core_list,omitempty"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} +func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ServerConfig) GetSecurityParams() *SecurityParams { + if m != nil { + return m.SecurityParams + } + return nil +} + +func (m *ServerConfig) GetPayloadConfig() *PayloadConfig { + if m != nil { + return m.PayloadConfig + } + return nil +} + +type ServerArgs struct { + // Types that are valid to be assigned to Argtype: + // *ServerArgs_Setup + // *ServerArgs_Mark + Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` +} + +func (m *ServerArgs) Reset() { *m = ServerArgs{} } +func (m *ServerArgs) String() string { return proto.CompactTextString(m) } +func (*ServerArgs) ProtoMessage() {} +func (*ServerArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +type isServerArgs_Argtype interface { + isServerArgs_Argtype() +} + +type ServerArgs_Setup struct { + Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup,oneof"` +} +type ServerArgs_Mark struct { + Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` +} + +func (*ServerArgs_Setup) isServerArgs_Argtype() {} +func (*ServerArgs_Mark) isServerArgs_Argtype() {} + +func (m *ServerArgs) GetArgtype() isServerArgs_Argtype { + if m != nil { + return m.Argtype + } + return nil +} + +func (m *ServerArgs) GetSetup() *ServerConfig { + if x, ok := m.GetArgtype().(*ServerArgs_Setup); ok { + return x.Setup + } + return nil +} + +func (m *ServerArgs) GetMark() *Mark { + if x, ok := m.GetArgtype().(*ServerArgs_Mark); ok { + return x.Mark + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerArgs_OneofMarshaler, _ServerArgs_OneofUnmarshaler, _ServerArgs_OneofSizer, []interface{}{ + (*ServerArgs_Setup)(nil), + (*ServerArgs_Mark)(nil), + } +} + +func _ServerArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerArgs) + // argtype + switch x := m.Argtype.(type) { + case *ServerArgs_Setup: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Setup); err != nil { + return err + } + case *ServerArgs_Mark: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mark); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServerArgs.Argtype has unexpected type %T", x) + } + return nil +} + +func _ServerArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerArgs) + switch tag { + case 1: // argtype.setup + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerConfig) + err := b.DecodeMessage(msg) + m.Argtype = &ServerArgs_Setup{msg} + return true, err + case 2: // argtype.mark + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mark) + err := b.DecodeMessage(msg) + m.Argtype = &ServerArgs_Mark{msg} + return true, err + default: + return false, nil + } +} + +func _ServerArgs_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerArgs) + // argtype + switch x := m.Argtype.(type) { + case *ServerArgs_Setup: + s := proto.Size(x.Setup) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerArgs_Mark: + s := proto.Size(x.Mark) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ServerStatus struct { + Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + // the port bound by the server + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + // Number of cores available to the server + Cores int32 `protobuf:"varint,3,opt,name=cores" json:"cores,omitempty"` +} + +func (m *ServerStatus) Reset() { *m = ServerStatus{} } +func (m *ServerStatus) String() string { return proto.CompactTextString(m) } +func (*ServerStatus) ProtoMessage() {} +func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *ServerStatus) GetStats() *ServerStats { + if m != nil { + return m.Stats + } + return nil +} + +type CoreRequest struct { +} + +func (m *CoreRequest) Reset() { *m = CoreRequest{} } +func (m *CoreRequest) String() string { return proto.CompactTextString(m) } +func (*CoreRequest) ProtoMessage() {} +func (*CoreRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type CoreResponse struct { + // Number of cores available on the server + Cores int32 `protobuf:"varint,1,opt,name=cores" json:"cores,omitempty"` +} + +func (m *CoreResponse) Reset() { *m = CoreResponse{} } +func (m *CoreResponse) String() string { return proto.CompactTextString(m) } +func (*CoreResponse) ProtoMessage() {} +func (*CoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type Void struct { +} + +func (m *Void) Reset() { *m = Void{} } +func (m *Void) String() string { return proto.CompactTextString(m) } +func (*Void) ProtoMessage() {} +func (*Void) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +// A single performance scenario: input to qps_json_driver +type Scenario struct { + // Human readable name for this scenario + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Client configuration + ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig" json:"client_config,omitempty"` + // Number of clients to start for the test + NumClients int32 `protobuf:"varint,3,opt,name=num_clients,json=numClients" json:"num_clients,omitempty"` + // Server configuration + ServerConfig *ServerConfig `protobuf:"bytes,4,opt,name=server_config,json=serverConfig" json:"server_config,omitempty"` + // Number of servers to start for the test + NumServers int32 `protobuf:"varint,5,opt,name=num_servers,json=numServers" json:"num_servers,omitempty"` + // Warmup period, in seconds + WarmupSeconds int32 `protobuf:"varint,6,opt,name=warmup_seconds,json=warmupSeconds" json:"warmup_seconds,omitempty"` + // Benchmark time, in seconds + BenchmarkSeconds int32 `protobuf:"varint,7,opt,name=benchmark_seconds,json=benchmarkSeconds" json:"benchmark_seconds,omitempty"` + // Number of workers to spawn locally (usually zero) + SpawnLocalWorkerCount int32 `protobuf:"varint,8,opt,name=spawn_local_worker_count,json=spawnLocalWorkerCount" json:"spawn_local_worker_count,omitempty"` +} + +func (m *Scenario) Reset() { *m = Scenario{} } +func (m *Scenario) String() string { return proto.CompactTextString(m) } +func (*Scenario) ProtoMessage() {} +func (*Scenario) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *Scenario) GetClientConfig() *ClientConfig { + if m != nil { + return m.ClientConfig + } + return nil +} + +func (m *Scenario) GetServerConfig() *ServerConfig { + if m != nil { + return m.ServerConfig + } + return nil +} + +// A set of scenarios to be run with qps_json_driver +type Scenarios struct { + Scenarios []*Scenario `protobuf:"bytes,1,rep,name=scenarios" json:"scenarios,omitempty"` +} + +func (m *Scenarios) Reset() { *m = Scenarios{} } +func (m *Scenarios) String() string { return proto.CompactTextString(m) } +func (*Scenarios) ProtoMessage() {} +func (*Scenarios) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *Scenarios) GetScenarios() []*Scenario { + if m != nil { + return m.Scenarios + } + return nil +} + +func init() { + proto.RegisterType((*PoissonParams)(nil), "grpc.testing.PoissonParams") + proto.RegisterType((*UniformParams)(nil), "grpc.testing.UniformParams") + proto.RegisterType((*DeterministicParams)(nil), "grpc.testing.DeterministicParams") + proto.RegisterType((*ParetoParams)(nil), "grpc.testing.ParetoParams") + proto.RegisterType((*ClosedLoopParams)(nil), "grpc.testing.ClosedLoopParams") + proto.RegisterType((*LoadParams)(nil), "grpc.testing.LoadParams") + proto.RegisterType((*SecurityParams)(nil), "grpc.testing.SecurityParams") + proto.RegisterType((*ClientConfig)(nil), "grpc.testing.ClientConfig") + proto.RegisterType((*ClientStatus)(nil), "grpc.testing.ClientStatus") + proto.RegisterType((*Mark)(nil), "grpc.testing.Mark") + proto.RegisterType((*ClientArgs)(nil), "grpc.testing.ClientArgs") + proto.RegisterType((*ServerConfig)(nil), "grpc.testing.ServerConfig") + proto.RegisterType((*ServerArgs)(nil), "grpc.testing.ServerArgs") + proto.RegisterType((*ServerStatus)(nil), "grpc.testing.ServerStatus") + proto.RegisterType((*CoreRequest)(nil), "grpc.testing.CoreRequest") + proto.RegisterType((*CoreResponse)(nil), "grpc.testing.CoreResponse") + proto.RegisterType((*Void)(nil), "grpc.testing.Void") + proto.RegisterType((*Scenario)(nil), "grpc.testing.Scenario") + proto.RegisterType((*Scenarios)(nil), "grpc.testing.Scenarios") + proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) + proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) + proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) +} + +func init() { proto.RegisterFile("control.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1162 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0xdb, 0x46, + 0x13, 0x8d, 0x14, 0xc9, 0x96, 0x86, 0x92, 0xac, 0x6f, 0xbf, 0xa4, 0x60, 0x1c, 0x27, 0x6d, 0xd8, + 0x16, 0x0d, 0x5c, 0xc0, 0x29, 0xd4, 0x02, 0x69, 0xd1, 0x8b, 0x40, 0x56, 0x85, 0xd8, 0x80, 0xe3, + 0xba, 0x2b, 0x27, 0x45, 0xae, 0x08, 0x9a, 0x5a, 0x4b, 0x44, 0x24, 0x2e, 0xbb, 0x4b, 0xc6, 0xf0, + 0x2b, 0xf4, 0x99, 0xfa, 0x1c, 0x7d, 0x8d, 0xbe, 0x42, 0x67, 0xff, 0x64, 0x52, 0x11, 0x10, 0xb7, + 0xbd, 0xe3, 0xce, 0x9c, 0xb3, 0x3b, 0x3b, 0x67, 0x66, 0x96, 0xd0, 0x8d, 0x79, 0x9a, 0x0b, 0xbe, + 0x38, 0xc8, 0x04, 0xcf, 0x39, 0xe9, 0xcc, 0x44, 0x16, 0x1f, 0xe4, 0x4c, 0xe6, 0x49, 0x3a, 0xdb, + 0xed, 0x65, 0xd1, 0xf5, 0x82, 0x47, 0x53, 0x69, 0xbc, 0xbb, 0x9e, 0xcc, 0xa3, 0xdc, 0x2e, 0x82, + 0x01, 0x74, 0xcf, 0x78, 0x22, 0x25, 0x4f, 0xcf, 0x22, 0x11, 0x2d, 0x25, 0x79, 0x02, 0x1d, 0x7e, + 0x79, 0xc9, 0x04, 0x9b, 0x86, 0x8a, 0xe4, 0xd7, 0x3e, 0xab, 0x3d, 0xad, 0x51, 0xcf, 0xda, 0x4e, + 0xd0, 0x14, 0x44, 0xd0, 0x7d, 0x9d, 0x26, 0x97, 0x5c, 0x2c, 0x2d, 0xe7, 0x2b, 0xd8, 0x49, 0xd2, + 0x9c, 0x89, 0x48, 0x88, 0xe4, 0x7d, 0xb4, 0x40, 0xa2, 0xa5, 0xf5, 0xca, 0xe6, 0x13, 0xfe, 0x01, + 0x70, 0x9e, 0xf8, 0xf5, 0x0f, 0x81, 0x47, 0x49, 0xf0, 0x3d, 0xfc, 0xff, 0x27, 0x86, 0x96, 0x65, + 0x92, 0x26, 0x78, 0x8b, 0xf8, 0xf6, 0xc1, 0xfd, 0x02, 0x1d, 0x04, 0xb3, 0x9c, 0x5b, 0xca, 0xd7, + 0xf0, 0xbf, 0xca, 0x91, 0x17, 0x91, 0x64, 0x96, 0xd7, 0x2f, 0x3b, 0x0e, 0xd1, 0x4e, 0xee, 0x41, + 0x33, 0x5a, 0x64, 0xf3, 0xc8, 0x46, 0x65, 0x16, 0x01, 0x81, 0xfe, 0x68, 0xc1, 0xa5, 0x3a, 0x80, + 0x67, 0x66, 0xdb, 0xe0, 0x8f, 0x3a, 0x80, 0x3a, 0xcf, 0x9e, 0x32, 0x04, 0x2f, 0xd6, 0x10, 0x8c, + 0x8b, 0x67, 0x7a, 0x7f, 0x6f, 0xf0, 0xf8, 0xa0, 0xac, 0xc3, 0xc1, 0xfa, 0x1e, 0x47, 0x77, 0x28, + 0xc4, 0x2b, 0x1b, 0x79, 0x0e, 0xdb, 0x99, 0x51, 0x42, 0x9f, 0xee, 0x0d, 0x1e, 0x56, 0xe9, 0x15, + 0x99, 0x90, 0xeb, 0xd0, 0x8a, 0x58, 0x18, 0x39, 0xfc, 0xbb, 0x9b, 0x88, 0x15, 0xad, 0x14, 0xd1, + 0xa2, 0xc9, 0x8f, 0xb0, 0x35, 0xd5, 0x49, 0xf6, 0x1b, 0x9a, 0xf7, 0xa4, 0xca, 0xdb, 0x20, 0x00, + 0xb2, 0x2d, 0x85, 0x7c, 0x07, 0x5b, 0x99, 0xce, 0xb3, 0xdf, 0xd4, 0xe4, 0xdd, 0xb5, 0x68, 0x4b, + 0x1a, 0x28, 0x96, 0xc1, 0x1e, 0x6e, 0x41, 0x43, 0x09, 0x17, 0x5c, 0x40, 0x6f, 0xc2, 0xe2, 0x42, + 0x24, 0xf9, 0xb5, 0xcd, 0xe0, 0x63, 0xf0, 0x0a, 0xc9, 0x42, 0xc5, 0x0f, 0xe3, 0x48, 0x67, 0xb0, + 0x45, 0xdb, 0x68, 0x3a, 0x47, 0xcb, 0x28, 0x22, 0xdf, 0xc0, 0x3d, 0xc9, 0xc4, 0x7b, 0x26, 0xc2, + 0x39, 0x47, 0x08, 0xc7, 0x2f, 0x91, 0x4c, 0x99, 0xce, 0x55, 0x9b, 0x12, 0xe3, 0x3b, 0x42, 0xd7, + 0xcf, 0xd6, 0x13, 0xfc, 0xde, 0x84, 0xce, 0x68, 0x91, 0xb0, 0x34, 0x1f, 0xf1, 0xf4, 0x32, 0x99, + 0x91, 0x2f, 0xa1, 0x67, 0xb7, 0xc8, 0x23, 0x31, 0x63, 0xb9, 0xc4, 0x53, 0xee, 0x22, 0xb9, 0x6b, + 0xac, 0xe7, 0xc6, 0x48, 0x7e, 0x50, 0x5a, 0x2a, 0x5a, 0x98, 0x5f, 0x67, 0xe6, 0x80, 0xde, 0xc0, + 0x5f, 0xd7, 0x52, 0x01, 0xce, 0xd1, 0xaf, 0x34, 0x74, 0xdf, 0x64, 0x0c, 0x3b, 0xd2, 0x5e, 0x2b, + 0xcc, 0xf4, 0xbd, 0xac, 0x24, 0x7b, 0x55, 0x7a, 0xf5, 0xee, 0xb4, 0x27, 0xab, 0xb9, 0x78, 0x01, + 0x7b, 0xbc, 0xc8, 0xb1, 0x4d, 0xd3, 0x29, 0xa2, 0x43, 0x64, 0xca, 0x30, 0xc3, 0xb0, 0xe3, 0x79, + 0x94, 0xa6, 0x6c, 0xa1, 0xe5, 0x6a, 0xd2, 0x07, 0x25, 0x0c, 0x45, 0xc8, 0x19, 0x13, 0x23, 0x03, + 0x50, 0x7d, 0x66, 0xaf, 0x60, 0x29, 0x52, 0xab, 0xd4, 0xa4, 0x3d, 0x63, 0xb6, 0x38, 0xa9, 0xb2, + 0x1a, 0xc9, 0xeb, 0x34, 0x0e, 0xdd, 0x8d, 0xe7, 0x82, 0xe1, 0xa4, 0xf0, 0xb7, 0x35, 0x9a, 0x68, + 0x9f, 0xbd, 0xab, 0xf1, 0x20, 0xa3, 0x85, 0xf1, 0x98, 0xd4, 0xb4, 0x74, 0x6a, 0xee, 0x57, 0xef, + 0x86, 0xa1, 0xe8, 0xbc, 0x6c, 0x0b, 0xf3, 0xa1, 0xf2, 0xa9, 0x34, 0x77, 0x09, 0x01, 0x9d, 0x90, + 0xb5, 0x7c, 0xde, 0xb4, 0x12, 0x85, 0xc5, 0x4d, 0x5b, 0x1d, 0x82, 0x1b, 0x5e, 0x61, 0xac, 0x35, + 0xf4, 0xbd, 0x8d, 0xad, 0x61, 0x30, 0x46, 0x66, 0xda, 0xcd, 0xca, 0x4b, 0x72, 0x04, 0xfd, 0x39, + 0x96, 0x30, 0x9f, 0xe1, 0x8e, 0x2e, 0x86, 0x8e, 0xde, 0xe5, 0x51, 0x75, 0x97, 0x23, 0x87, 0xb2, + 0x81, 0xec, 0xcc, 0xab, 0x06, 0xf2, 0x10, 0xda, 0x31, 0x17, 0x2c, 0x5c, 0xa0, 0xdd, 0xef, 0x62, + 0xe9, 0x34, 0x69, 0x4b, 0x19, 0x4e, 0x70, 0x4d, 0x1e, 0x01, 0x58, 0xe7, 0x32, 0xc9, 0xfd, 0x9e, + 0xce, 0x5f, 0xdb, 0x78, 0xd1, 0x10, 0xbc, 0x70, 0xb5, 0x38, 0xc1, 0xe1, 0x5b, 0x48, 0xf2, 0x0c, + 0x9a, 0x7a, 0x0c, 0xdb, 0x51, 0xf1, 0x60, 0x53, 0x79, 0x29, 0xa8, 0xa4, 0x06, 0x17, 0xec, 0x41, + 0xe3, 0x55, 0x24, 0xde, 0xa9, 0x11, 0x25, 0x98, 0x64, 0xb9, 0xed, 0x10, 0xb3, 0x08, 0x0a, 0x00, + 0xc3, 0x19, 0x8a, 0x99, 0x24, 0x03, 0xdc, 0x9c, 0xe5, 0x85, 0x9b, 0x43, 0xbb, 0x9b, 0x36, 0x37, + 0xd9, 0xc1, 0xd6, 0x34, 0x50, 0xf2, 0x14, 0x1a, 0x4b, 0xdc, 0xdf, 0xce, 0x1e, 0x52, 0xa5, 0xa8, + 0x93, 0x11, 0xaa, 0x11, 0x87, 0x6d, 0xd8, 0xc6, 0x4e, 0x51, 0x05, 0x10, 0xfc, 0x59, 0x87, 0xce, + 0x44, 0x37, 0x8f, 0x4d, 0x36, 0x6a, 0xed, 0x5a, 0x4c, 0x15, 0x48, 0x6d, 0x53, 0xef, 0x18, 0x82, + 0xe9, 0x1d, 0xb9, 0xfa, 0xde, 0xd4, 0x3b, 0xf5, 0x7f, 0xd1, 0x3b, 0x04, 0x1a, 0x19, 0x17, 0xb9, + 0xed, 0x11, 0xfd, 0x7d, 0x53, 0xe5, 0x2e, 0xb6, 0x0d, 0x55, 0x6e, 0xa3, 0xb2, 0x55, 0x5e, 0x55, + 0xb3, 0xb5, 0xa6, 0xe6, 0x86, 0xba, 0x6c, 0xff, 0xe3, 0xba, 0xac, 0x54, 0x13, 0x54, 0xab, 0x49, + 0xe9, 0x69, 0x02, 0xba, 0x85, 0x9e, 0x65, 0x01, 0xfe, 0xa3, 0x9e, 0x89, 0x93, 0xf3, 0x56, 0x55, + 0x7a, 0x03, 0x75, 0x55, 0xba, 0xca, 0x7e, 0xbd, 0x94, 0x7d, 0xac, 0x58, 0x75, 0x2f, 0x33, 0x0a, + 0x9b, 0xd4, 0x2c, 0x82, 0x2e, 0x78, 0x23, 0xfc, 0xa0, 0xec, 0xb7, 0x02, 0xb7, 0x0b, 0xbe, 0xc0, + 0xfe, 0xd0, 0x4b, 0x99, 0xf1, 0xd4, 0xbc, 0xc4, 0x86, 0x54, 0x2b, 0x93, 0xf0, 0xf9, 0x78, 0xc3, + 0x93, 0x69, 0xf0, 0x57, 0x1d, 0x5a, 0x93, 0x98, 0xa5, 0x91, 0x48, 0xb8, 0x3a, 0x33, 0x8d, 0x96, + 0xa6, 0xd8, 0xda, 0x54, 0x7f, 0xe3, 0x04, 0xed, 0xba, 0x01, 0x68, 0xf4, 0xa9, 0x7f, 0xac, 0x13, + 0x68, 0x27, 0x2e, 0xbf, 0x15, 0x9f, 0x82, 0x97, 0x16, 0x4b, 0x3b, 0x16, 0x5d, 0xe8, 0x80, 0x26, + 0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0xee, 0x84, 0xc6, 0xc7, 0xb4, 0xa1, 0x1d, 0x59, 0x6e, 0x15, + 0x7b, 0x82, 0xb1, 0xb9, 0xf9, 0xac, 0x4e, 0x30, 0x1c, 0xa9, 0x9e, 0xab, 0xab, 0x48, 0x2c, 0x8b, + 0x0c, 0x31, 0x78, 0x06, 0xd6, 0xeb, 0x96, 0xc6, 0x74, 0x8d, 0x75, 0x62, 0x8c, 0xea, 0x07, 0xe7, + 0x82, 0xa5, 0xf1, 0x5c, 0x69, 0xb9, 0x42, 0x9a, 0xca, 0xee, 0xaf, 0x1c, 0x0e, 0xfc, 0x1c, 0x7c, + 0x99, 0x45, 0x57, 0x29, 0xfe, 0xa6, 0xc4, 0xf8, 0x33, 0x74, 0xc5, 0xc5, 0x3b, 0x7d, 0x83, 0x22, + 0x75, 0x55, 0x7e, 0x5f, 0xfb, 0x4f, 0x94, 0xfb, 0x57, 0xed, 0x1d, 0x29, 0x67, 0x30, 0x84, 0xb6, + 0x4b, 0xb8, 0xc4, 0xb7, 0xbf, 0x2d, 0xdd, 0x42, 0xbf, 0xa1, 0xde, 0xe0, 0x93, 0xb5, 0x7b, 0x5b, + 0x37, 0xbd, 0x01, 0xee, 0x3f, 0x73, 0x33, 0x4a, 0xb7, 0xfb, 0x0e, 0x78, 0x93, 0xb7, 0xa7, 0xa3, + 0x70, 0x74, 0x72, 0x3c, 0x3e, 0x3d, 0xef, 0xdf, 0x21, 0x7d, 0xe8, 0x0c, 0xcb, 0x96, 0xda, 0xfe, + 0xb1, 0x6b, 0x82, 0x0a, 0x61, 0x32, 0xa6, 0x6f, 0xc6, 0xb4, 0x4c, 0xb0, 0x96, 0x1a, 0xf1, 0xe1, + 0x9e, 0xb1, 0xbc, 0x1c, 0x9f, 0x8e, 0xe9, 0xf1, 0xca, 0x53, 0xdf, 0xff, 0x1c, 0xb6, 0xed, 0xbb, + 0x44, 0xda, 0xd0, 0x7c, 0x7d, 0x3a, 0xa4, 0x6f, 0x71, 0x87, 0x2e, 0x5e, 0xea, 0x9c, 0x8e, 0x87, + 0xaf, 0x8e, 0x4f, 0x5f, 0xf6, 0x6b, 0x17, 0x5b, 0xfa, 0x97, 0xf8, 0xdb, 0xbf, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto new file mode 100644 index 00000000000..e0fe0ec728f --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto @@ -0,0 +1,201 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "payloads.proto"; +import "stats.proto"; + +package grpc.testing; + +enum ClientType { + SYNC_CLIENT = 0; + ASYNC_CLIENT = 1; +} + +enum ServerType { + SYNC_SERVER = 0; + ASYNC_SERVER = 1; + ASYNC_GENERIC_SERVER = 2; +} + +enum RpcType { + UNARY = 0; + STREAMING = 1; +} + +// Parameters of poisson process distribution, which is a good representation +// of activity coming in from independent identical stationary sources. +message PoissonParams { + // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). + double offered_load = 1; +} + +message UniformParams { + double interarrival_lo = 1; + double interarrival_hi = 2; +} + +message DeterministicParams { + double offered_load = 1; +} + +message ParetoParams { + double interarrival_base = 1; + double alpha = 2; +} + +// Once an RPC finishes, immediately start a new one. +// No configuration parameters needed. +message ClosedLoopParams { +} + +message LoadParams { + oneof load { + ClosedLoopParams closed_loop = 1; + PoissonParams poisson = 2; + UniformParams uniform = 3; + DeterministicParams determ = 4; + ParetoParams pareto = 5; + }; +} + +// presence of SecurityParams implies use of TLS +message SecurityParams { + bool use_test_ca = 1; + string server_host_override = 2; +} + +message ClientConfig { + // List of targets to connect to. At least one target needs to be specified. + repeated string server_targets = 1; + ClientType client_type = 2; + SecurityParams security_params = 3; + // How many concurrent RPCs to start for each channel. + // For synchronous client, use a separate thread for each outstanding RPC. + int32 outstanding_rpcs_per_channel = 4; + // Number of independent client channels to create. + // i-th channel will connect to server_target[i % server_targets.size()] + int32 client_channels = 5; + // Only for async client. Number of threads to use to start/manage RPCs. + int32 async_client_threads = 7; + RpcType rpc_type = 8; + // The requested load for the entire client (aggregated over all the threads). + LoadParams load_params = 10; + PayloadConfig payload_config = 11; + HistogramParams histogram_params = 12; + + // Specify the cores we should run the client on, if desired + repeated int32 core_list = 13; + int32 core_limit = 14; +} + +message ClientStatus { + ClientStats stats = 1; +} + +// Request current stats +message Mark { + // if true, the stats will be reset after taking their snapshot. + bool reset = 1; +} + +message ClientArgs { + oneof argtype { + ClientConfig setup = 1; + Mark mark = 2; + } +} + +message ServerConfig { + ServerType server_type = 1; + SecurityParams security_params = 2; + // Port on which to listen. Zero means pick unused port. + int32 port = 4; + // Only for async server. Number of threads used to serve the requests. + int32 async_server_threads = 7; + // Specify the number of cores to limit server to, if desired + int32 core_limit = 8; + // payload config, used in generic server + PayloadConfig payload_config = 9; + + // Specify the cores we should run the server on, if desired + repeated int32 core_list = 10; +} + +message ServerArgs { + oneof argtype { + ServerConfig setup = 1; + Mark mark = 2; + } +} + +message ServerStatus { + ServerStats stats = 1; + // the port bound by the server + int32 port = 2; + // Number of cores available to the server + int32 cores = 3; +} + +message CoreRequest { +} + +message CoreResponse { + // Number of cores available on the server + int32 cores = 1; +} + +message Void { +} + +// A single performance scenario: input to qps_json_driver +message Scenario { + // Human readable name for this scenario + string name = 1; + // Client configuration + ClientConfig client_config = 2; + // Number of clients to start for the test + int32 num_clients = 3; + // Server configuration + ServerConfig server_config = 4; + // Number of servers to start for the test + int32 num_servers = 5; + // Warmup period, in seconds + int32 warmup_seconds = 6; + // Benchmark time, in seconds + int32 benchmark_seconds = 7; + // Number of workers to spawn locally (usually zero) + int32 spawn_local_worker_count = 8; +} + +// A set of scenarios to be run with qps_json_driver +message Scenarios { + repeated Scenario scenarios = 1; +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go new file mode 100644 index 00000000000..b3c8cff7542 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. +// source: messages.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Compression algorithms +type CompressionType int32 + +const ( + // No compression + CompressionType_NONE CompressionType = 0 + CompressionType_GZIP CompressionType = 1 + CompressionType_DEFLATE CompressionType = 2 +) + +var CompressionType_name = map[int32]string{ + 0: "NONE", + 1: "GZIP", + 2: "DEFLATE", +} +var CompressionType_value = map[string]int32{ + "NONE": 0, + "GZIP": 1, + "DEFLATE": 2, +} + +func (x CompressionType) String() string { + return proto.EnumName(CompressionType_name, int32(x)) +} +func (CompressionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +type EchoStatus struct { + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *EchoStatus) Reset() { *m = EchoStatus{} } +func (m *EchoStatus) String() string { return proto.CompactTextString(m) } +func (*EchoStatus) ProtoMessage() {} +func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` + // Compression algorithm to be used by the server for the response (stream) + ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} +func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} +func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} +func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Compression algorithm to be used by the server for the response (stream) + ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} +func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} +func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// For reconnect interop test only. +// Client tells server what reconnection parameters it used. +type ReconnectParams struct { + MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs" json:"max_reconnect_backoff_ms,omitempty"` +} + +func (m *ReconnectParams) Reset() { *m = ReconnectParams{} } +func (m *ReconnectParams) String() string { return proto.CompactTextString(m) } +func (*ReconnectParams) ProtoMessage() {} +func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +// For reconnect interop test only. +// Server tells client whether its reconnects are following the spec and the +// reconnect backoffs it saw. +type ReconnectInfo struct { + Passed bool `protobuf:"varint,1,opt,name=passed" json:"passed,omitempty"` + BackoffMs []int32 `protobuf:"varint,2,rep,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"` +} + +func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} } +func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) } +func (*ReconnectInfo) ProtoMessage() {} +func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func init() { + proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") + proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") + proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") + proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") + proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") + proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") + proto.RegisterType((*ReconnectParams)(nil), "grpc.testing.ReconnectParams") + proto.RegisterType((*ReconnectInfo)(nil), "grpc.testing.ReconnectInfo") + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) + proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value) +} + +func init() { proto.RegisterFile("messages.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 645 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x25, 0xdf, 0xe9, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x51, 0x99, 0x4b, 0x55, 0x89, + 0x20, 0x15, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9b, 0x84, 0x75, 0x7b, 0xe1, 0x62, + 0x6d, 0x9c, 0x4d, 0x1a, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x28, 0x07, 0xee, 0xfc, 0x60, 0xee, 0xec, + 0xae, 0xbd, 0x8e, 0xd3, 0xf6, 0xd0, 0xc2, 0x85, 0xdb, 0xce, 0xcc, 0x9b, 0x97, 0x79, 0x33, 0xcf, + 0x0a, 0xb4, 0x7c, 0xca, 0x39, 0x99, 0x51, 0xde, 0x09, 0x23, 0x26, 0x18, 0x6a, 0xce, 0xa2, 0xd0, + 0xeb, 0x08, 0xca, 0xc5, 0x3c, 0x98, 0xd9, 0xa7, 0x50, 0x1b, 0x91, 0xab, 0x05, 0x23, 0x13, 0xf4, + 0x02, 0xca, 0xe2, 0x2a, 0xa4, 0x56, 0x61, 0xb7, 0xb0, 0xd7, 0x3a, 0xd8, 0xea, 0xe4, 0x71, 0x9d, + 0x14, 0x74, 0x2e, 0x01, 0x58, 0xc3, 0x10, 0x82, 0xf2, 0x98, 0x4d, 0xae, 0xac, 0xa2, 0x84, 0x37, + 0xb1, 0x7e, 0xdb, 0x6f, 0x01, 0x7a, 0xde, 0x25, 0x73, 0x04, 0x11, 0x31, 0x57, 0x08, 0x8f, 0x4d, + 0x12, 0xc2, 0x0a, 0xd6, 0x6f, 0x64, 0x41, 0x2d, 0x9d, 0x47, 0x37, 0xae, 0x61, 0x13, 0xda, 0xbf, + 0x4a, 0xb0, 0xee, 0xcc, 0xfd, 0x70, 0x41, 0x31, 0xfd, 0x1a, 0xcb, 0x9f, 0x45, 0xef, 0x60, 0x3d, + 0xa2, 0x3c, 0x64, 0x01, 0xa7, 0xee, 0xdd, 0x26, 0x6b, 0x1a, 0xbc, 0x8a, 0xd0, 0xf3, 0x5c, 0x3f, + 0x9f, 0xff, 0x48, 0x7e, 0xb1, 0xb2, 0x04, 0x39, 0x32, 0x87, 0x5e, 0x42, 0x2d, 0x4c, 0x18, 0xac, + 0x92, 0x2c, 0x37, 0x0e, 0x36, 0x6f, 0xa5, 0xc7, 0x06, 0xa5, 0x58, 0xa7, 0xf3, 0xc5, 0xc2, 0x8d, + 0x39, 0x8d, 0x02, 0xe2, 0x53, 0xab, 0x2c, 0xdb, 0xea, 0xb8, 0xa9, 0x92, 0x17, 0x69, 0x0e, 0xed, + 0x41, 0x5b, 0x83, 0x18, 0x89, 0xc5, 0xa5, 0xcb, 0x3d, 0x26, 0xa7, 0xaf, 0x68, 0x5c, 0x4b, 0xe5, + 0x87, 0x2a, 0xed, 0xa8, 0x2c, 0x1a, 0xc1, 0xa3, 0x6c, 0x48, 0x8f, 0xf9, 0xa1, 0x0c, 0xf8, 0x9c, + 0x05, 0x56, 0x55, 0x6b, 0xdd, 0x59, 0x1d, 0xe6, 0x68, 0x09, 0xd0, 0x7a, 0x1f, 0x9a, 0xd6, 0x5c, + 0x01, 0x75, 0x61, 0x63, 0x29, 0x5b, 0x5f, 0xc2, 0xaa, 0x69, 0x65, 0xd6, 0x2a, 0xd9, 0xf2, 0x52, + 0xb8, 0x95, 0xad, 0x44, 0xc7, 0xf6, 0x4f, 0x68, 0x99, 0x53, 0x24, 0xf9, 0xfc, 0x9a, 0x0a, 0x77, + 0x5a, 0xd3, 0x36, 0xd4, 0xb3, 0x0d, 0x25, 0x97, 0xce, 0x62, 0xf4, 0x0c, 0x1a, 0xf9, 0xc5, 0x94, + 0x74, 0x19, 0x58, 0xb6, 0x14, 0xe9, 0xca, 0x2d, 0x47, 0x44, 0x94, 0xf8, 0x92, 0xba, 0x1f, 0x84, + 0xb1, 0x38, 0x22, 0x8b, 0x85, 0xb1, 0xc5, 0x7d, 0x47, 0xb1, 0xcf, 0x61, 0xfb, 0x36, 0xb6, 0x54, + 0xd9, 0x6b, 0x78, 0x42, 0x66, 0xb3, 0x88, 0xce, 0x88, 0xa0, 0x13, 0x37, 0xed, 0x49, 0xfc, 0x92, + 0x18, 0x77, 0x73, 0x59, 0x4e, 0xa9, 0x95, 0x71, 0xec, 0x3e, 0x20, 0xc3, 0x31, 0x22, 0x91, 0x94, + 0x25, 0x68, 0xa4, 0x3d, 0x9f, 0x6b, 0xd5, 0x6f, 0x25, 0x77, 0x1e, 0xc8, 0xea, 0x37, 0xa2, 0x5c, + 0x93, 0xba, 0x10, 0x4c, 0xea, 0x82, 0xdb, 0xbf, 0x8b, 0xb9, 0x09, 0x87, 0xb1, 0xb8, 0x26, 0xf8, + 0x5f, 0xbf, 0x83, 0x4f, 0x90, 0xf9, 0x44, 0xea, 0x33, 0xa3, 0xca, 0x39, 0x4a, 0x72, 0x79, 0xbb, + 0xab, 0x2c, 0x37, 0x25, 0x61, 0x14, 0xdd, 0x94, 0x79, 0xef, 0xaf, 0xe6, 0xbf, 0xb4, 0xf9, 0x00, + 0x9e, 0xde, 0xba, 0xf6, 0xbf, 0xf4, 0xbc, 0xfd, 0x11, 0x36, 0x30, 0xf5, 0x58, 0x10, 0x50, 0x4f, + 0xe8, 0x65, 0x71, 0xf4, 0x06, 0x2c, 0x9f, 0x7c, 0x77, 0x23, 0x93, 0x76, 0xc7, 0xc4, 0xfb, 0xc2, + 0xa6, 0x53, 0xd7, 0xe7, 0xc6, 0x5e, 0xb2, 0x9e, 0x75, 0x1d, 0x26, 0xd5, 0x33, 0x6e, 0x9f, 0xc0, + 0x7a, 0x96, 0xed, 0x07, 0x53, 0x86, 0x1e, 0x43, 0x35, 0x24, 0x9c, 0xd3, 0x64, 0x98, 0x3a, 0x4e, + 0x23, 0xb4, 0x03, 0x90, 0xe3, 0x54, 0x47, 0xad, 0xe0, 0xb5, 0xb1, 0xe1, 0xd9, 0x7f, 0x0f, 0x8d, + 0x9c, 0x33, 0x50, 0x1b, 0x9a, 0x47, 0xc3, 0xb3, 0x11, 0xee, 0x39, 0x4e, 0xf7, 0xf0, 0xb4, 0xd7, + 0x7e, 0x20, 0x1d, 0xdb, 0xba, 0x18, 0xac, 0xe4, 0x0a, 0x08, 0xa0, 0x8a, 0xbb, 0x83, 0xe3, 0xe1, + 0x59, 0xbb, 0xb8, 0x7f, 0x00, 0x1b, 0xd7, 0xee, 0x81, 0xea, 0x50, 0x1e, 0x0c, 0x07, 0xaa, 0x59, + 0xbe, 0x3e, 0x7c, 0xee, 0x8f, 0x64, 0x4b, 0x03, 0x6a, 0xc7, 0xbd, 0x93, 0xd3, 0xee, 0x79, 0xaf, + 0x5d, 0x1c, 0x57, 0xf5, 0x5f, 0xcd, 0xab, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, + 0x1e, 0x7c, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto new file mode 100644 index 00000000000..b1abc9e8187 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto @@ -0,0 +1,172 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Message definitions to be used by integration test service definitions. + +syntax = "proto3"; + +package grpc.testing; + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// Compression algorithms +enum CompressionType { + // No compression + NONE = 0; + GZIP = 1; + DEFLATE = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + PayloadType type = 1; + // Primary contents of payload. + bytes body = 2; +} + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +message EchoStatus { + int32 code = 1; + string message = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 response_size = 2; + + // Optional input payload sent along with the request. + Payload payload = 3; + + // Whether SimpleResponse should include username. + bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + bool fill_oauth_scope = 5; + + // Compression algorithm to be used by the server for the response (stream) + CompressionType response_compression = 6; + + // Whether server should return a given status + EchoStatus response_status = 7; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + Payload payload = 1; + // The user the request came from, for verifying authentication was + // successful when the client expected it. + string username = 2; + // OAuth scope. + string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + Payload payload = 3; + + // Compression algorithm to be used by the server for the response (stream) + CompressionType response_compression = 6; + + // Whether server should return a given status + EchoStatus response_status = 7; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + Payload payload = 1; +} + +// For reconnect interop test only. +// Client tells server what reconnection parameters it used. +message ReconnectParams { + int32 max_reconnect_backoff_ms = 1; +} + +// For reconnect interop test only. +// Server tells client whether its reconnects are following the spec and the +// reconnect backoffs it saw. +message ReconnectInfo { + bool passed = 1; + repeated int32 backoff_ms = 2; +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go new file mode 100644 index 00000000000..ffc357d899f --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go @@ -0,0 +1,223 @@ +// Code generated by protoc-gen-go. +// source: payloads.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ByteBufferParams struct { + ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` + RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` +} + +func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} } +func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) } +func (*ByteBufferParams) ProtoMessage() {} +func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type SimpleProtoParams struct { + ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` + RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` +} + +func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} } +func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) } +func (*SimpleProtoParams) ProtoMessage() {} +func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +type ComplexProtoParams struct { +} + +func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} } +func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) } +func (*ComplexProtoParams) ProtoMessage() {} +func (*ComplexProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +type PayloadConfig struct { + // Types that are valid to be assigned to Payload: + // *PayloadConfig_BytebufParams + // *PayloadConfig_SimpleParams + // *PayloadConfig_ComplexParams + Payload isPayloadConfig_Payload `protobuf_oneof:"payload"` +} + +func (m *PayloadConfig) Reset() { *m = PayloadConfig{} } +func (m *PayloadConfig) String() string { return proto.CompactTextString(m) } +func (*PayloadConfig) ProtoMessage() {} +func (*PayloadConfig) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +type isPayloadConfig_Payload interface { + isPayloadConfig_Payload() +} + +type PayloadConfig_BytebufParams struct { + BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,oneof"` +} +type PayloadConfig_SimpleParams struct { + SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,oneof"` +} +type PayloadConfig_ComplexParams struct { + ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,oneof"` +} + +func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {} +func (*PayloadConfig_SimpleParams) isPayloadConfig_Payload() {} +func (*PayloadConfig_ComplexParams) isPayloadConfig_Payload() {} + +func (m *PayloadConfig) GetPayload() isPayloadConfig_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *PayloadConfig) GetBytebufParams() *ByteBufferParams { + if x, ok := m.GetPayload().(*PayloadConfig_BytebufParams); ok { + return x.BytebufParams + } + return nil +} + +func (m *PayloadConfig) GetSimpleParams() *SimpleProtoParams { + if x, ok := m.GetPayload().(*PayloadConfig_SimpleParams); ok { + return x.SimpleParams + } + return nil +} + +func (m *PayloadConfig) GetComplexParams() *ComplexProtoParams { + if x, ok := m.GetPayload().(*PayloadConfig_ComplexParams); ok { + return x.ComplexParams + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PayloadConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PayloadConfig_OneofMarshaler, _PayloadConfig_OneofUnmarshaler, _PayloadConfig_OneofSizer, []interface{}{ + (*PayloadConfig_BytebufParams)(nil), + (*PayloadConfig_SimpleParams)(nil), + (*PayloadConfig_ComplexParams)(nil), + } +} + +func _PayloadConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PayloadConfig) + // payload + switch x := m.Payload.(type) { + case *PayloadConfig_BytebufParams: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BytebufParams); err != nil { + return err + } + case *PayloadConfig_SimpleParams: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SimpleParams); err != nil { + return err + } + case *PayloadConfig_ComplexParams: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ComplexParams); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PayloadConfig.Payload has unexpected type %T", x) + } + return nil +} + +func _PayloadConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PayloadConfig) + switch tag { + case 1: // payload.bytebuf_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ByteBufferParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_BytebufParams{msg} + return true, err + case 2: // payload.simple_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SimpleProtoParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_SimpleParams{msg} + return true, err + case 3: // payload.complex_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ComplexProtoParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_ComplexParams{msg} + return true, err + default: + return false, nil + } +} + +func _PayloadConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PayloadConfig) + // payload + switch x := m.Payload.(type) { + case *PayloadConfig_BytebufParams: + s := proto.Size(x.BytebufParams) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PayloadConfig_SimpleParams: + s := proto.Size(x.SimpleParams) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PayloadConfig_ComplexParams: + s := proto.Size(x.ComplexParams) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*ByteBufferParams)(nil), "grpc.testing.ByteBufferParams") + proto.RegisterType((*SimpleProtoParams)(nil), "grpc.testing.SimpleProtoParams") + proto.RegisterType((*ComplexProtoParams)(nil), "grpc.testing.ComplexProtoParams") + proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig") +} + +func init() { proto.RegisterFile("payloads.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc, + 0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, + 0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49, + 0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2, + 0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62, + 0x07, 0xf2, 0x83, 0x81, 0x5c, 0x21, 0x69, 0x2e, 0xce, 0xa2, 0xd4, 0xe2, 0x02, 0x88, 0x1c, 0x13, + 0x58, 0x8e, 0x03, 0x24, 0x00, 0x92, 0x54, 0xf2, 0xe6, 0x12, 0x0c, 0xce, 0xcc, 0x2d, 0xc8, 0x49, + 0x0d, 0x00, 0x59, 0x44, 0xa1, 0x61, 0x22, 0x5c, 0x42, 0xce, 0xf9, 0x20, 0xc3, 0x2a, 0x90, 0x4c, + 0x53, 0xfa, 0xc6, 0xc8, 0xc5, 0x1b, 0x00, 0xf1, 0x8f, 0x73, 0x7e, 0x5e, 0x5a, 0x66, 0xba, 0x90, + 0x3b, 0x17, 0x5f, 0x12, 0xd0, 0x03, 0x49, 0xa5, 0x69, 0xf1, 0x05, 0x60, 0x35, 0x60, 0x5b, 0xb8, + 0x8d, 0xe4, 0xf4, 0x90, 0xfd, 0xa9, 0x87, 0xee, 0x49, 0x0f, 0x86, 0x20, 0x5e, 0xa8, 0x3e, 0xa8, + 0x43, 0xdd, 0xb8, 0x78, 0x8b, 0xc1, 0xae, 0x87, 0x99, 0xc3, 0x04, 0x36, 0x47, 0x1e, 0xd5, 0x1c, + 0x0c, 0x0f, 0x02, 0x0d, 0xe2, 0x81, 0xe8, 0x83, 0x9a, 0xe3, 0xc9, 0xc5, 0x97, 0x0c, 0x71, 0x38, + 0xcc, 0x20, 0x66, 0xb0, 0x41, 0x0a, 0xa8, 0x06, 0x61, 0x7a, 0x0e, 0xe4, 0x24, 0xa8, 0x4e, 0x88, + 0x80, 0x13, 0x27, 0x17, 0x3b, 0x34, 0xf2, 0x92, 0xd8, 0xc0, 0x91, 0x67, 0x0c, 0x08, 0x00, 0x00, + 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto new file mode 100644 index 00000000000..056fe0c72ed --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto @@ -0,0 +1,55 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message ByteBufferParams { + int32 req_size = 1; + int32 resp_size = 2; +} + +message SimpleProtoParams { + int32 req_size = 1; + int32 resp_size = 2; +} + +message ComplexProtoParams { + // TODO (vpai): Fill this in once the details of complex, representative + // protos are decided +} + +message PayloadConfig { + oneof payload { + ByteBufferParams bytebuf_params = 1; + SimpleProtoParams simple_params = 2; + ComplexProtoParams complex_params = 3; + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go new file mode 100644 index 00000000000..15d08644871 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. +// source: services.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for BenchmarkService service + +type BenchmarkServiceClient interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) +} + +type benchmarkServiceClient struct { + cc *grpc.ClientConn +} + +func NewBenchmarkServiceClient(cc *grpc.ClientConn) BenchmarkServiceClient { + return &benchmarkServiceClient{cc} +} + +func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], c.cc, "/grpc.testing.BenchmarkService/StreamingCall", opts...) + if err != nil { + return nil, err + } + x := &benchmarkServiceStreamingCallClient{stream} + return x, nil +} + +type BenchmarkService_StreamingCallClient interface { + Send(*SimpleRequest) error + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type benchmarkServiceStreamingCallClient struct { + grpc.ClientStream +} + +func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for BenchmarkService service + +type BenchmarkServiceServer interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(BenchmarkService_StreamingCallServer) error +} + +func RegisterBenchmarkServiceServer(s *grpc.Server, srv BenchmarkServiceServer) { + s.RegisterService(&_BenchmarkService_serviceDesc, srv) +} + +func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BenchmarkServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.BenchmarkService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream}) +} + +type BenchmarkService_StreamingCallServer interface { + Send(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type benchmarkServiceStreamingCallServer struct { + grpc.ServerStream +} + +func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _BenchmarkService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.BenchmarkService", + HandlerType: (*BenchmarkServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnaryCall", + Handler: _BenchmarkService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingCall", + Handler: _BenchmarkService_StreamingCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor3, +} + +// Client API for WorkerService service + +type WorkerServiceClient interface { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) + // Just return the core count - unary call + CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) + // Quit this worker + QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) +} + +type workerServiceClient struct { + cc *grpc.ClientConn +} + +func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient { + return &workerServiceClient{cc} +} + +func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { + stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[0], c.cc, "/grpc.testing.WorkerService/RunServer", opts...) + if err != nil { + return nil, err + } + x := &workerServiceRunServerClient{stream} + return x, nil +} + +type WorkerService_RunServerClient interface { + Send(*ServerArgs) error + Recv() (*ServerStatus, error) + grpc.ClientStream +} + +type workerServiceRunServerClient struct { + grpc.ClientStream +} + +func (x *workerServiceRunServerClient) Send(m *ServerArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { + m := new(ServerStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { + stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[1], c.cc, "/grpc.testing.WorkerService/RunClient", opts...) + if err != nil { + return nil, err + } + x := &workerServiceRunClientClient{stream} + return x, nil +} + +type WorkerService_RunClientClient interface { + Send(*ClientArgs) error + Recv() (*ClientStatus, error) + grpc.ClientStream +} + +type workerServiceRunClientClient struct { + grpc.ClientStream +} + +func (x *workerServiceRunClientClient) Send(m *ClientArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { + m := new(ClientStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { + out := new(CoreResponse) + err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { + out := new(Void) + err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for WorkerService service + +type WorkerServiceServer interface { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunServer(WorkerService_RunServerServer) error + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunClient(WorkerService_RunClientServer) error + // Just return the core count - unary call + CoreCount(context.Context, *CoreRequest) (*CoreResponse, error) + // Quit this worker + QuitWorker(context.Context, *Void) (*Void, error) +} + +func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer) { + s.RegisterService(&_WorkerService_serviceDesc, srv) +} + +func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream}) +} + +type WorkerService_RunServerServer interface { + Send(*ServerStatus) error + Recv() (*ServerArgs, error) + grpc.ServerStream +} + +type workerServiceRunServerServer struct { + grpc.ServerStream +} + +func (x *workerServiceRunServerServer) Send(m *ServerStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) { + m := new(ServerArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream}) +} + +type WorkerService_RunClientServer interface { + Send(*ClientStatus) error + Recv() (*ClientArgs, error) + grpc.ServerStream +} + +type workerServiceRunClientServer struct { + grpc.ServerStream +} + +func (x *workerServiceRunClientServer) Send(m *ClientStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) { + m := new(ClientArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoreRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkerServiceServer).CoreCount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.WorkerService/CoreCount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Void) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkerServiceServer).QuitWorker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.WorkerService/QuitWorker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void)) + } + return interceptor(ctx, in, info, handler) +} + +var _WorkerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.WorkerService", + HandlerType: (*WorkerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CoreCount", + Handler: _WorkerService_CoreCount_Handler, + }, + { + MethodName: "QuitWorker", + Handler: _WorkerService_QuitWorker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "RunServer", + Handler: _WorkerService_RunServer_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "RunClient", + Handler: _WorkerService_RunClient_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor3, +} + +func init() { proto.RegisterFile("services.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17, + 0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0x4d, 0xea, 0xcc, 0x44, 0xf0, + 0x49, 0x7c, 0x07, 0x9f, 0xd2, 0xee, 0x66, 0x0b, 0xb5, 0xe4, 0xb6, 0xc7, 0xf9, 0xbf, 0xe1, 0x23, + 0x7f, 0x46, 0x2c, 0x08, 0xf0, 0xcb, 0x34, 0x40, 0x65, 0x8f, 0x9e, 0xbd, 0x3c, 0x69, 0xb1, 0x6f, + 0x4a, 0x06, 0x62, 0xe3, 0x5a, 0xb5, 0xe8, 0x80, 0x48, 0xb7, 0x23, 0x55, 0x45, 0xe3, 0x1d, 0xa3, + 0xb7, 0x71, 0x5c, 0xfe, 0x66, 0xe2, 0x74, 0x05, 0xae, 0xf9, 0xe8, 0x34, 0x6e, 0xea, 0x28, 0x92, + 0x0f, 0x22, 0x7f, 0x76, 0x1a, 0xbf, 0x2b, 0x6d, 0xad, 0xbc, 0x28, 0xa7, 0xbe, 0xb2, 0x36, 0x5d, + 0x6f, 0x61, 0x0d, 0x9f, 0x61, 0x08, 0xd4, 0x65, 0x1a, 0x52, 0xef, 0x1d, 0x81, 0x7c, 0x14, 0x45, + 0xcd, 0x08, 0xba, 0x1b, 0xd8, 0x81, 0xae, 0xab, 0xec, 0x3a, 0x5b, 0xfe, 0x1c, 0x89, 0xe2, 0xd5, + 0xe3, 0x06, 0x70, 0x7c, 0xe9, 0xbd, 0xc8, 0xd7, 0xc1, 0x6d, 0x27, 0x40, 0x79, 0x36, 0x13, 0xec, + 0xd2, 0x3b, 0x6c, 0x49, 0xa9, 0x14, 0xa9, 0x59, 0x73, 0xa0, 0xad, 0x78, 0xaf, 0xa9, 0xac, 0x01, + 0xc7, 0x73, 0x4d, 0x4c, 0x53, 0x9a, 0x48, 0x26, 0x9a, 0x95, 0xc8, 0x2b, 0x8f, 0x50, 0xf9, 0x30, + 0x68, 0xce, 0x67, 0xcb, 0x03, 0x18, 0x9b, 0xaa, 0x14, 0xda, 0xff, 0xd9, 0xad, 0x10, 0x4f, 0xc1, + 0x70, 0xac, 0x29, 0xe5, 0xff, 0xcd, 0x17, 0x6f, 0xde, 0x55, 0x22, 0x7b, 0x3b, 0xde, 0x5d, 0xf3, + 0xe6, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto new file mode 100644 index 00000000000..c2acca7f1a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto @@ -0,0 +1,71 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto3"; + +import "messages.proto"; +import "control.proto"; + +package grpc.testing; + +service BenchmarkService { + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); +} + +service WorkerService { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + rpc RunServer(stream ServerArgs) returns (stream ServerStatus); + + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + rpc RunClient(stream ClientArgs) returns (stream ClientStatus); + + // Just return the core count - unary call + rpc CoreCount(CoreRequest) returns (CoreResponse); + + // Quit this worker + rpc QuitWorker(Void) returns (Void); +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go new file mode 100644 index 00000000000..45b9b4af690 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go @@ -0,0 +1,111 @@ +// Code generated by protoc-gen-go. +// source: stats.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ServerStats struct { + // wall clock time change in seconds since last reset + TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` + // change in user time (in seconds) used by the server since last reset + TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` + // change in server time (in seconds) used by the server process and all + // threads since last reset + TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` +} + +func (m *ServerStats) Reset() { *m = ServerStats{} } +func (m *ServerStats) String() string { return proto.CompactTextString(m) } +func (*ServerStats) ProtoMessage() {} +func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +// Histogram params based on grpc/support/histogram.c +type HistogramParams struct { + Resolution float64 `protobuf:"fixed64,1,opt,name=resolution" json:"resolution,omitempty"` + MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible" json:"max_possible,omitempty"` +} + +func (m *HistogramParams) Reset() { *m = HistogramParams{} } +func (m *HistogramParams) String() string { return proto.CompactTextString(m) } +func (*HistogramParams) ProtoMessage() {} +func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +// Histogram data based on grpc/support/histogram.c +type HistogramData struct { + Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` + MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen" json:"min_seen,omitempty"` + MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen" json:"max_seen,omitempty"` + Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` + SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares" json:"sum_of_squares,omitempty"` + Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` +} + +func (m *HistogramData) Reset() { *m = HistogramData{} } +func (m *HistogramData) String() string { return proto.CompactTextString(m) } +func (*HistogramData) ProtoMessage() {} +func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +type ClientStats struct { + // Latency histogram. Data points are in nanoseconds. + Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` + // See ServerStats for details. + TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` + TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` + TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *ClientStats) GetLatencies() *HistogramData { + if m != nil { + return m.Latencies + } + return nil +} + +func init() { + proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats") + proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams") + proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData") + proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats") +} + +func init() { proto.RegisterFile("stats.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 342 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x4f, 0xe3, 0x30, + 0x10, 0xc5, 0x95, 0xa6, 0xed, 0xb6, 0x93, 0x76, 0x77, 0x65, 0xad, 0x56, 0x41, 0x95, 0xf8, 0x13, + 0x71, 0xe8, 0x29, 0x07, 0x38, 0x71, 0x06, 0x24, 0x6e, 0x54, 0x0d, 0x9c, 0x23, 0x37, 0x4c, 0x2b, + 0x8b, 0xc4, 0x0e, 0x99, 0x09, 0x2a, 0x1f, 0x09, 0xf1, 0x25, 0x71, 0x9c, 0x08, 0x0a, 0x48, 0x70, + 0x49, 0xf2, 0x7e, 0x6f, 0x34, 0xe3, 0xc9, 0x33, 0x04, 0xc4, 0x92, 0x29, 0x2e, 0x2b, 0xc3, 0x46, + 0x4c, 0x36, 0x55, 0x99, 0xc5, 0x8c, 0xc4, 0x4a, 0x6f, 0x22, 0x0d, 0x41, 0x82, 0xd5, 0x23, 0x56, + 0x49, 0x53, 0x22, 0x8e, 0x60, 0xc2, 0xaa, 0xc0, 0x14, 0x73, 0x59, 0x12, 0xde, 0x85, 0xde, 0xa1, + 0x37, 0xf7, 0x96, 0x41, 0xc3, 0x2e, 0x5b, 0x24, 0x66, 0x30, 0x76, 0x25, 0x35, 0x61, 0x15, 0xf6, + 0x9c, 0x3f, 0x6a, 0xc0, 0xad, 0xd5, 0xe2, 0x00, 0x5c, 0x6d, 0x4a, 0x4f, 0xc4, 0x58, 0x84, 0xbe, + 0xb3, 0xa1, 0x41, 0x89, 0x23, 0xd1, 0x0d, 0xfc, 0xb9, 0x52, 0xc4, 0x66, 0x53, 0xc9, 0x62, 0x21, + 0xed, 0x83, 0xc4, 0x3e, 0x40, 0x85, 0x64, 0xf2, 0x9a, 0x95, 0xd1, 0xdd, 0xc4, 0x1d, 0xd2, 0x9c, + 0xa9, 0x90, 0xdb, 0xb4, 0x34, 0x44, 0x6a, 0x95, 0x63, 0x37, 0x33, 0xb0, 0x6c, 0xd1, 0xa1, 0xe8, + 0xc5, 0x83, 0xe9, 0x5b, 0xdb, 0x0b, 0xc9, 0x52, 0xfc, 0x87, 0xe1, 0xaa, 0xce, 0xee, 0x91, 0x6d, + 0x43, 0x7f, 0x3e, 0x5d, 0x76, 0x4a, 0xec, 0xc1, 0xa8, 0x50, 0x3a, 0x25, 0x44, 0xdd, 0x35, 0xfa, + 0x65, 0x75, 0x62, 0xa5, 0xb3, 0xec, 0x1c, 0x67, 0xf9, 0x9d, 0x25, 0xb7, 0xce, 0xfa, 0x0b, 0x3e, + 0xd5, 0x45, 0xd8, 0x77, 0xb4, 0xf9, 0x14, 0xc7, 0xf0, 0xdb, 0xbe, 0x52, 0xb3, 0x4e, 0xe9, 0xa1, + 0x96, 0xf6, 0xb4, 0xe1, 0xc0, 0x99, 0x13, 0x4b, 0xaf, 0xd7, 0x49, 0xcb, 0xc4, 0x3f, 0x18, 0x64, + 0xa6, 0xd6, 0x1c, 0x0e, 0x9d, 0xd9, 0x8a, 0xe8, 0xd9, 0x83, 0xe0, 0x3c, 0x57, 0xa8, 0xb9, 0xfd, + 0xe9, 0x67, 0x30, 0xce, 0x25, 0xa3, 0xce, 0x94, 0x6d, 0xd3, 0xec, 0x1f, 0x9c, 0xcc, 0xe2, 0xdd, + 0x94, 0xe2, 0x0f, 0xbb, 0x2d, 0xdf, 0xab, 0xbf, 0xe4, 0xd5, 0xfb, 0x21, 0x2f, 0xff, 0xfb, 0xbc, + 0xfa, 0x9f, 0xf3, 0x5a, 0x0d, 0xdd, 0xa5, 0x39, 0x7d, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, + 0x34, 0x90, 0x43, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto new file mode 100644 index 00000000000..9bc3cb216ba --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto @@ -0,0 +1,70 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message ServerStats { + // wall clock time change in seconds since last reset + double time_elapsed = 1; + + // change in user time (in seconds) used by the server since last reset + double time_user = 2; + + // change in server time (in seconds) used by the server process and all + // threads since last reset + double time_system = 3; +} + +// Histogram params based on grpc/support/histogram.c +message HistogramParams { + double resolution = 1; // first bucket is [0, 1 + resolution) + double max_possible = 2; // use enough buckets to allow this value +} + +// Histogram data based on grpc/support/histogram.c +message HistogramData { + repeated uint32 bucket = 1; + double min_seen = 2; + double max_seen = 3; + double sum = 4; + double sum_of_squares = 5; + double count = 6; +} + +message ClientStats { + // Latency histogram. Data points are in nanoseconds. + HistogramData latencies = 1; + + // See ServerStats for details. + double time_elapsed = 2; + double time_user = 3; + double time_system = 4; +} diff --git a/vendor/google.golang.org/grpc/benchmark/server/main.go b/vendor/google.golang.org/grpc/benchmark/server/main.go new file mode 100644 index 00000000000..d43aad0bac0 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/server/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "time" + + "google.golang.org/grpc/benchmark" + "google.golang.org/grpc/grpclog" +) + +var ( + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server") +) + +func main() { + flag.Parse() + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Server profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + addr, stopper := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces + grpclog.Println("Server Address: ", addr) + <-time.After(time.Duration(*duration) * time.Second) + stopper() +} diff --git a/vendor/google.golang.org/grpc/benchmark/server/testdata/ca.pem b/vendor/google.golang.org/grpc/benchmark/server/testdata/ca.pem new file mode 100644 index 00000000000..6c8511a73c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/server/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.key b/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.key new file mode 100644 index 00000000000..143a5b87658 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.pem b/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.pem new file mode 100644 index 00000000000..f3d43fcc5be --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/benchmark/stats/histogram.go b/vendor/google.golang.org/grpc/benchmark/stats/histogram.go new file mode 100644 index 00000000000..099bcd65564 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/stats/histogram.go @@ -0,0 +1,198 @@ +package stats + +import ( + "bytes" + "fmt" + "io" + "log" + "math" + "strconv" + "strings" +) + +// Histogram accumulates values in the form of a histogram with +// exponentially increased bucket sizes. +type Histogram struct { + // Count is the total number of values added to the histogram. + Count int64 + // Sum is the sum of all the values added to the histogram. + Sum int64 + // SumOfSquares is the sum of squares of all values. + SumOfSquares int64 + // Min is the minimum of all the values added to the histogram. + Min int64 + // Max is the maximum of all the values added to the histogram. + Max int64 + // Buckets contains all the buckets of the histogram. + Buckets []HistogramBucket + + opts HistogramOptions + logBaseBucketSize float64 + oneOverLogOnePlusGrowthFactor float64 +} + +// HistogramOptions contains the parameters that define the histogram's buckets. +// The first bucket of the created histogram (with index 0) contains [min, min+n) +// where n = BaseBucketSize, min = MinValue. +// Bucket i (i>=1) contains [min + n * m^(i-1), min + n * m^i), where m = 1+GrowthFactor. +// The type of the values is int64. +type HistogramOptions struct { + // NumBuckets is the number of buckets. + NumBuckets int + // GrowthFactor is the growth factor of the buckets. A value of 0.1 + // indicates that bucket N+1 will be 10% larger than bucket N. + GrowthFactor float64 + // BaseBucketSize is the size of the first bucket. + BaseBucketSize float64 + // MinValue is the lower bound of the first bucket. + MinValue int64 +} + +// HistogramBucket represents one histogram bucket. +type HistogramBucket struct { + // LowBound is the lower bound of the bucket. + LowBound float64 + // Count is the number of values in the bucket. + Count int64 +} + +// NewHistogram returns a pointer to a new Histogram object that was created +// with the provided options. +func NewHistogram(opts HistogramOptions) *Histogram { + if opts.NumBuckets == 0 { + opts.NumBuckets = 32 + } + if opts.BaseBucketSize == 0.0 { + opts.BaseBucketSize = 1.0 + } + h := Histogram{ + Buckets: make([]HistogramBucket, opts.NumBuckets), + Min: math.MaxInt64, + Max: math.MinInt64, + + opts: opts, + logBaseBucketSize: math.Log(opts.BaseBucketSize), + oneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor), + } + m := 1.0 + opts.GrowthFactor + delta := opts.BaseBucketSize + h.Buckets[0].LowBound = float64(opts.MinValue) + for i := 1; i < opts.NumBuckets; i++ { + h.Buckets[i].LowBound = float64(opts.MinValue) + delta + delta = delta * m + } + return &h +} + +// Print writes textual output of the histogram values. +func (h *Histogram) Print(w io.Writer) { + avg := float64(h.Sum) / float64(h.Count) + fmt.Fprintf(w, "Count: %d Min: %d Max: %d Avg: %.2f\n", h.Count, h.Min, h.Max, avg) + fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) + if h.Count <= 0 { + return + } + + maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64)) + if maxBucketDigitLen < 3 { + // For "inf". + maxBucketDigitLen = 3 + } + maxCountDigitLen := len(strconv.FormatInt(h.Count, 10)) + percentMulti := 100 / float64(h.Count) + + accCount := int64(0) + for i, b := range h.Buckets { + fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound) + if i+1 < len(h.Buckets) { + fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound) + } else { + fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") + } + + accCount += b.Count + fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) + + const barScale = 0.1 + barLength := int(float64(b.Count)*percentMulti*barScale + 0.5) + fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength)) + } +} + +// String returns the textual output of the histogram values as string. +func (h *Histogram) String() string { + var b bytes.Buffer + h.Print(&b) + return b.String() +} + +// Clear resets all the content of histogram. +func (h *Histogram) Clear() { + h.Count = 0 + h.Sum = 0 + h.SumOfSquares = 0 + h.Min = math.MaxInt64 + h.Max = math.MinInt64 + for _, v := range h.Buckets { + v.Count = 0 + } +} + +// Opts returns a copy of the options used to create the Histogram. +func (h *Histogram) Opts() HistogramOptions { + return h.opts +} + +// Add adds a value to the histogram. +func (h *Histogram) Add(value int64) error { + bucket, err := h.findBucket(value) + if err != nil { + return err + } + h.Buckets[bucket].Count++ + h.Count++ + h.Sum += value + h.SumOfSquares += value * value + if value < h.Min { + h.Min = value + } + if value > h.Max { + h.Max = value + } + return nil +} + +func (h *Histogram) findBucket(value int64) (int, error) { + delta := float64(value - h.opts.MinValue) + var b int + if delta >= h.opts.BaseBucketSize { + // b = log_{1+growthFactor} (delta / baseBucketSize) + 1 + // = log(delta / baseBucketSize) / log(1+growthFactor) + 1 + // = (log(delta) - log(baseBucketSize)) * (1 / log(1+growthFactor)) + 1 + b = int((math.Log(delta)-h.logBaseBucketSize)*h.oneOverLogOnePlusGrowthFactor + 1) + } + if b >= len(h.Buckets) { + return 0, fmt.Errorf("no bucket for value: %d", value) + } + return b, nil +} + +// Merge takes another histogram h2, and merges its content into h. +// The two histograms must be created by equivalent HistogramOptions. +func (h *Histogram) Merge(h2 *Histogram) { + if h.opts != h2.opts { + log.Fatalf("failed to merge histograms, created by inequivalent options") + } + h.Count += h2.Count + h.Sum += h2.Sum + h.SumOfSquares += h2.SumOfSquares + if h2.Min < h.Min { + h.Min = h2.Min + } + if h2.Max > h.Max { + h.Max = h2.Max + } + for i, b := range h2.Buckets { + h.Buckets[i].Count += b.Count + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/stats.go b/vendor/google.golang.org/grpc/benchmark/stats/stats.go new file mode 100644 index 00000000000..e0edb174072 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/stats/stats.go @@ -0,0 +1,116 @@ +package stats + +import ( + "bytes" + "fmt" + "io" + "math" + "time" +) + +// Stats is a simple helper for gathering additional statistics like histogram +// during benchmarks. This is not thread safe. +type Stats struct { + numBuckets int + unit time.Duration + min, max int64 + histogram *Histogram + + durations durationSlice + dirty bool +} + +type durationSlice []time.Duration + +// NewStats creates a new Stats instance. If numBuckets is not positive, +// the default value (16) will be used. +func NewStats(numBuckets int) *Stats { + if numBuckets <= 0 { + numBuckets = 16 + } + return &Stats{ + // Use one more bucket for the last unbounded bucket. + numBuckets: numBuckets + 1, + durations: make(durationSlice, 0, 100000), + } +} + +// Add adds an elapsed time per operation to the stats. +func (stats *Stats) Add(d time.Duration) { + stats.durations = append(stats.durations, d) + stats.dirty = true +} + +// Clear resets the stats, removing all values. +func (stats *Stats) Clear() { + stats.durations = stats.durations[:0] + stats.histogram = nil + stats.dirty = false +} + +// maybeUpdate updates internal stat data if there was any newly added +// stats since this was updated. +func (stats *Stats) maybeUpdate() { + if !stats.dirty { + return + } + + stats.min = math.MaxInt64 + stats.max = 0 + for _, d := range stats.durations { + if stats.min > int64(d) { + stats.min = int64(d) + } + if stats.max < int64(d) { + stats.max = int64(d) + } + } + + // Use the largest unit that can represent the minimum time duration. + stats.unit = time.Nanosecond + for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} { + if stats.min <= int64(u) { + break + } + stats.unit = u + } + + // Adjust the min/max according to the new unit. + stats.min /= int64(stats.unit) + stats.max /= int64(stats.unit) + numBuckets := stats.numBuckets + if n := int(stats.max - stats.min + 1); n < numBuckets { + numBuckets = n + } + stats.histogram = NewHistogram(HistogramOptions{ + NumBuckets: numBuckets, + // max-min(lower bound of last bucket) = (1 + growthFactor)^(numBuckets-2) * baseBucketSize. + GrowthFactor: math.Pow(float64(stats.max-stats.min), 1/float64(numBuckets-2)) - 1, + BaseBucketSize: 1.0, + MinValue: stats.min}) + + for _, d := range stats.durations { + stats.histogram.Add(int64(d / stats.unit)) + } + + stats.dirty = false +} + +// Print writes textual output of the Stats. +func (stats *Stats) Print(w io.Writer) { + stats.maybeUpdate() + + if stats.histogram == nil { + fmt.Fprint(w, "Histogram (empty)\n") + } else { + fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:]) + stats.histogram.Print(w) + } +} + +// String returns the textual output of the Stats as string. +func (stats *Stats) String() string { + var b bytes.Buffer + stats.Print(&b) + return b.String() +} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/util.go b/vendor/google.golang.org/grpc/benchmark/stats/util.go new file mode 100644 index 00000000000..a9922f985f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/stats/util.go @@ -0,0 +1,191 @@ +package stats + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "testing" +) + +var ( + curB *testing.B + curBenchName string + curStats map[string]*Stats + + orgStdout *os.File + nextOutPos int + + injectCond *sync.Cond + injectDone chan struct{} +) + +// AddStats adds a new unnamed Stats instance to the current benchmark. You need +// to run benchmarks by calling RunTestMain() to inject the stats to the +// benchmark results. If numBuckets is not positive, the default value (16) will +// be used. Please note that this calls b.ResetTimer() since it may be blocked +// until the previous benchmark stats is printed out. So AddStats() should +// typically be called at the very beginning of each benchmark function. +func AddStats(b *testing.B, numBuckets int) *Stats { + return AddStatsWithName(b, "", numBuckets) +} + +// AddStatsWithName adds a new named Stats instance to the current benchmark. +// With this, you can add multiple stats in a single benchmark. You need +// to run benchmarks by calling RunTestMain() to inject the stats to the +// benchmark results. If numBuckets is not positive, the default value (16) will +// be used. Please note that this calls b.ResetTimer() since it may be blocked +// until the previous benchmark stats is printed out. So AddStatsWithName() +// should typically be called at the very beginning of each benchmark function. +func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats { + var benchName string + for i := 1; ; i++ { + pc, _, _, ok := runtime.Caller(i) + if !ok { + panic("benchmark function not found") + } + p := strings.Split(runtime.FuncForPC(pc).Name(), ".") + benchName = p[len(p)-1] + if strings.HasPrefix(benchName, "Benchmark") { + break + } + } + procs := runtime.GOMAXPROCS(-1) + if procs != 1 { + benchName = fmt.Sprintf("%s-%d", benchName, procs) + } + + stats := NewStats(numBuckets) + + if injectCond != nil { + // We need to wait until the previous benchmark stats is printed out. + injectCond.L.Lock() + for curB != nil && curBenchName != benchName { + injectCond.Wait() + } + + curB = b + curBenchName = benchName + curStats[name] = stats + + injectCond.L.Unlock() + } + + b.ResetTimer() + return stats +} + +// RunTestMain runs the tests with enabling injection of benchmark stats. It +// returns an exit code to pass to os.Exit. +func RunTestMain(m *testing.M) int { + startStatsInjector() + defer stopStatsInjector() + return m.Run() +} + +// startStatsInjector starts stats injection to benchmark results. +func startStatsInjector() { + orgStdout = os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + nextOutPos = 0 + + resetCurBenchStats() + + injectCond = sync.NewCond(&sync.Mutex{}) + injectDone = make(chan struct{}) + go func() { + defer close(injectDone) + + scanner := bufio.NewScanner(r) + scanner.Split(splitLines) + for scanner.Scan() { + injectStatsIfFinished(scanner.Text()) + } + if err := scanner.Err(); err != nil { + panic(err) + } + }() +} + +// stopStatsInjector stops stats injection and restores os.Stdout. +func stopStatsInjector() { + os.Stdout.Close() + <-injectDone + injectCond = nil + os.Stdout = orgStdout +} + +// splitLines is a split function for a bufio.Scanner that returns each line +// of text, teeing texts to the original stdout even before each line ends. +func splitLines(data []byte, eof bool) (advance int, token []byte, err error) { + if eof && len(data) == 0 { + return 0, nil, nil + } + + if i := bytes.IndexByte(data, '\n'); i >= 0 { + orgStdout.Write(data[nextOutPos : i+1]) + nextOutPos = 0 + return i + 1, data[0:i], nil + } + + orgStdout.Write(data[nextOutPos:]) + nextOutPos = len(data) + + if eof { + // This is a final, non-terminated line. Return it. + return len(data), data, nil + } + + return 0, nil, nil +} + +// injectStatsIfFinished prints out the stats if the current benchmark finishes. +func injectStatsIfFinished(line string) { + injectCond.L.Lock() + defer injectCond.L.Unlock() + + // We assume that the benchmark results start with the benchmark name. + if curB == nil || !strings.HasPrefix(line, curBenchName) { + return + } + + if !curB.Failed() { + // Output all stats in alphabetical order. + names := make([]string, 0, len(curStats)) + for name := range curStats { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + stats := curStats[name] + // The output of stats starts with a header like "Histogram (unit: ms)" + // followed by statistical properties and the buckets. Add the stats name + // if it is a named stats and indent them as Go testing outputs. + lines := strings.Split(stats.String(), "\n") + if n := len(lines); n > 0 { + if name != "" { + name = ": " + name + } + fmt.Fprintf(orgStdout, "--- %s%s\n", lines[0], name) + for _, line := range lines[1 : n-1] { + fmt.Fprintf(orgStdout, "\t%s\n", line) + } + } + } + } + + resetCurBenchStats() + injectCond.Signal() +} + +// resetCurBenchStats resets the current benchmark stats. +func resetCurBenchStats() { + curB = nil + curBenchName = "" + curStats = make(map[string]*Stats) +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go new file mode 100644 index 00000000000..77e522f26bb --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go @@ -0,0 +1,399 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "math" + "runtime" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + caFile = "benchmark/server/testdata/ca.pem" +) + +type lockingHistogram struct { + mu sync.Mutex + histogram *stats.Histogram +} + +func (h *lockingHistogram) add(value int64) { + h.mu.Lock() + defer h.mu.Unlock() + h.histogram.Add(value) +} + +// swap sets h.histogram to new, and returns its old value. +func (h *lockingHistogram) swap(new *stats.Histogram) *stats.Histogram { + h.mu.Lock() + defer h.mu.Unlock() + old := h.histogram + h.histogram = new + return old +} + +func (h *lockingHistogram) mergeInto(merged *stats.Histogram) { + h.mu.Lock() + defer h.mu.Unlock() + merged.Merge(h.histogram) +} + +type benchmarkClient struct { + closeConns func() + stop chan bool + lastResetTime time.Time + histogramOptions stats.HistogramOptions + lockingHistograms []lockingHistogram +} + +func printClientConfig(config *testpb.ClientConfig) { + // Some config options are ignored: + // - client type: + // will always create sync client + // - async client threads. + // - core list + grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType) + grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads) + // TODO: use cores specified by CoreList when setting list of cores is supported in go. + grpclog.Printf(" * core list: %v (ignored)", config.CoreList) + + grpclog.Printf(" - security params: %v", config.SecurityParams) + grpclog.Printf(" - core limit: %v", config.CoreLimit) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) + grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel) + grpclog.Printf(" - channel number: %v", config.ClientChannels) + grpclog.Printf(" - load params: %v", config.LoadParams) + grpclog.Printf(" - rpc type: %v", config.RpcType) + grpclog.Printf(" - histogram params: %v", config.HistogramParams) + grpclog.Printf(" - server targets: %v", config.ServerTargets) +} + +func setupClientEnv(config *testpb.ClientConfig) { + // Use all cpu cores available on machine by default. + // TODO: Revisit this for the optimal default setup. + if config.CoreLimit > 0 { + runtime.GOMAXPROCS(int(config.CoreLimit)) + } else { + runtime.GOMAXPROCS(runtime.NumCPU()) + } +} + +// createConns creates connections according to given config. +// It returns the connections and corresponding function to close them. +// It returns non-nil error if there is anything wrong. +func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) { + var opts []grpc.DialOption + + // Sanity check for client type. + switch config.ClientType { + case testpb.ClientType_SYNC_CLIENT: + case testpb.ClientType_ASYNC_CLIENT: + default: + return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType) + } + + // Check and set security options. + if config.SecurityParams != nil { + creds, err := credentials.NewClientTLSFromFile(abs(caFile), config.SecurityParams.ServerHostOverride) + if err != nil { + return nil, nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + // Use byteBufCodec if it is required. + if config.PayloadConfig != nil { + switch config.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + opts = append(opts, grpc.WithCodec(byteBufCodec{})) + case *testpb.PayloadConfig_SimpleParams: + default: + return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + } + } + + // Create connections. + connCount := int(config.ClientChannels) + conns := make([]*grpc.ClientConn, connCount, connCount) + for connIndex := 0; connIndex < connCount; connIndex++ { + conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...) + } + + return conns, func() { + for _, conn := range conns { + conn.Close() + } + }, nil +} + +func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error { + // Read payload size and type from config. + var ( + payloadReqSize, payloadRespSize int + payloadType string + ) + if config.PayloadConfig != nil { + switch c := config.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + payloadReqSize = int(c.BytebufParams.ReqSize) + payloadRespSize = int(c.BytebufParams.RespSize) + payloadType = "bytebuf" + case *testpb.PayloadConfig_SimpleParams: + payloadReqSize = int(c.SimpleParams.ReqSize) + payloadRespSize = int(c.SimpleParams.RespSize) + payloadType = "protobuf" + default: + return grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + } + } + + // TODO add open loop distribution. + switch config.LoadParams.Load.(type) { + case *testpb.LoadParams_ClosedLoop: + case *testpb.LoadParams_Poisson: + return grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) + default: + return grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) + } + + rpcCountPerConn := int(config.OutstandingRpcsPerChannel) + + switch config.RpcType { + case testpb.RpcType_UNARY: + bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) + // TODO open loop. + case testpb.RpcType_STREAMING: + bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) + // TODO open loop. + default: + return grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) + } + + return nil +} + +func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) { + printClientConfig(config) + + // Set running environment like how many cores to use. + setupClientEnv(config) + + conns, closeConns, err := createConns(config) + if err != nil { + return nil, err + } + + rpcCountPerConn := int(config.OutstandingRpcsPerChannel) + bc := &benchmarkClient{ + histogramOptions: stats.HistogramOptions{ + NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1, + GrowthFactor: config.HistogramParams.Resolution, + BaseBucketSize: (1 + config.HistogramParams.Resolution), + MinValue: 0, + }, + lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)), + + stop: make(chan bool), + lastResetTime: time.Now(), + closeConns: closeConns, + } + + if err = performRPCs(config, conns, bc); err != nil { + // Close all connections if performRPCs failed. + closeConns() + return nil, err + } + + return bc, nil +} + +func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { + for ic, conn := range conns { + client := testpb.NewBenchmarkServiceClient(conn) + // For each connection, create rpcCountPerConn goroutines to do rpc. + for j := 0; j < rpcCountPerConn; j++ { + // Create histogram for each goroutine. + idx := ic*rpcCountPerConn + j + bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) + // Start goroutine on the created mutex and histogram. + go func(idx int) { + // TODO: do warm up if necessary. + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, + // before starting benchmark. + done := make(chan bool) + for { + go func() { + start := time.Now() + if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + select { + case <-bc.stop: + case done <- false: + } + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + select { + case <-bc.stop: + case done <- true: + } + }() + select { + case <-bc.stop: + return + case <-done: + } + } + }(idx) + } + } +} + +func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { + var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error + if payloadType == "bytebuf" { + doRPC = benchmark.DoByteBufStreamingRoundTrip + } else { + doRPC = benchmark.DoStreamingRoundTrip + } + for ic, conn := range conns { + // For each connection, create rpcCountPerConn goroutines to do rpc. + for j := 0; j < rpcCountPerConn; j++ { + c := testpb.NewBenchmarkServiceClient(conn) + stream, err := c.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) + } + // Create histogram for each goroutine. + idx := ic*rpcCountPerConn + j + bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) + // Start goroutine on the created mutex and histogram. + go func(idx int) { + // TODO: do warm up if necessary. + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, + // before starting benchmark. + done := make(chan bool) + for { + go func() { + start := time.Now() + if err := doRPC(stream, reqSize, respSize); err != nil { + select { + case <-bc.stop: + case done <- false: + } + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + select { + case <-bc.stop: + case done <- true: + } + }() + select { + case <-bc.stop: + return + case <-done: + } + } + }(idx) + } + } +} + +// getStats returns the stats for benchmark client. +// It resets lastResetTime and all histograms if argument reset is true. +func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { + var timeElapsed float64 + mergedHistogram := stats.NewHistogram(bc.histogramOptions) + + if reset { + // Merging histogram may take some time. + // Put all histograms aside and merge later. + toMerge := make([]*stats.Histogram, len(bc.lockingHistograms), len(bc.lockingHistograms)) + for i := range bc.lockingHistograms { + toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions)) + } + + for i := 0; i < len(toMerge); i++ { + mergedHistogram.Merge(toMerge[i]) + } + + timeElapsed = time.Since(bc.lastResetTime).Seconds() + bc.lastResetTime = time.Now() + } else { + // Merge only, not reset. + for i := range bc.lockingHistograms { + bc.lockingHistograms[i].mergeInto(mergedHistogram) + } + timeElapsed = time.Since(bc.lastResetTime).Seconds() + } + + b := make([]uint32, len(mergedHistogram.Buckets), len(mergedHistogram.Buckets)) + for i, v := range mergedHistogram.Buckets { + b[i] = uint32(v.Count) + } + return &testpb.ClientStats{ + Latencies: &testpb.HistogramData{ + Bucket: b, + MinSeen: float64(mergedHistogram.Min), + MaxSeen: float64(mergedHistogram.Max), + Sum: float64(mergedHistogram.Sum), + SumOfSquares: float64(mergedHistogram.SumOfSquares), + Count: float64(mergedHistogram.Count), + }, + TimeElapsed: timeElapsed, + TimeUser: 0, + TimeSystem: 0, + } +} + +func (bc *benchmarkClient) shutdown() { + close(bc.stop) + bc.closeConns() +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go new file mode 100644 index 00000000000..667ef2c17d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "runtime" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + // File path related to google.golang.org/grpc. + certFile = "benchmark/server/testdata/server1.pem" + keyFile = "benchmark/server/testdata/server1.key" +) + +type benchmarkServer struct { + port int + cores int + closeFunc func() + mu sync.RWMutex + lastResetTime time.Time +} + +func printServerConfig(config *testpb.ServerConfig) { + // Some config options are ignored: + // - server type: + // will always start sync server + // - async server threads + // - core list + grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType) + grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads) + // TODO: use cores specified by CoreList when setting list of cores is supported in go. + grpclog.Printf(" * core list: %v (ignored)", config.CoreList) + + grpclog.Printf(" - security params: %v", config.SecurityParams) + grpclog.Printf(" - core limit: %v", config.CoreLimit) + grpclog.Printf(" - port: %v", config.Port) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) +} + +func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { + printServerConfig(config) + + // Use all cpu cores available on machine by default. + // TODO: Revisit this for the optimal default setup. + numOfCores := runtime.NumCPU() + if config.CoreLimit > 0 { + numOfCores = int(config.CoreLimit) + } + runtime.GOMAXPROCS(numOfCores) + + var opts []grpc.ServerOption + + // Sanity check for server type. + switch config.ServerType { + case testpb.ServerType_SYNC_SERVER: + case testpb.ServerType_ASYNC_SERVER: + case testpb.ServerType_ASYNC_GENERIC_SERVER: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType) + } + + // Set security options. + if config.SecurityParams != nil { + creds, err := credentials.NewServerTLSFromFile(abs(certFile), abs(keyFile)) + if err != nil { + grpclog.Fatalf("failed to generate credentials %v", err) + } + opts = append(opts, grpc.Creds(creds)) + } + + // Priority: config.Port > serverPort > default (0). + port := int(config.Port) + if port == 0 { + port = serverPort + } + + // Create different benchmark server according to config. + var ( + addr string + closeFunc func() + err error + ) + if config.PayloadConfig != nil { + switch payload := config.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + opts = append(opts, grpc.CustomCodec(byteBufCodec{})) + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "bytebuf", + Metadata: payload.BytebufParams.RespSize, + }, opts...) + case *testpb.PayloadConfig_SimpleParams: + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "protobuf", + }, opts...) + case *testpb.PayloadConfig_ComplexParams: + return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + } + } else { + // Start protobuf server if payload config is nil. + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "protobuf", + }, opts...) + } + + grpclog.Printf("benchmark server listening at %v", addr) + addrSplitted := strings.Split(addr, ":") + p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1]) + if err != nil { + grpclog.Fatalf("failed to get port number from server address: %v", err) + } + + return &benchmarkServer{port: p, cores: numOfCores, closeFunc: closeFunc, lastResetTime: time.Now()}, nil +} + +// getStats returns the stats for benchmark server. +// It resets lastResetTime if argument reset is true. +func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats { + // TODO wall time, sys time, user time. + bs.mu.RLock() + defer bs.mu.RUnlock() + timeElapsed := time.Since(bs.lastResetTime).Seconds() + if reset { + bs.lastResetTime = time.Now() + } + return &testpb.ServerStats{TimeElapsed: timeElapsed, TimeUser: 0, TimeSystem: 0} +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/main.go b/vendor/google.golang.org/grpc/benchmark/worker/main.go new file mode 100644 index 00000000000..17c52519bbe --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/main.go @@ -0,0 +1,231 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "flag" + "fmt" + "io" + "net" + "runtime" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +var ( + driverPort = flag.Int("driver_port", 10000, "port for communication with driver") + serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message") +) + +type byteBufCodec struct { +} + +func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { + b, ok := v.(*[]byte) + if !ok { + return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) + } + return *b, nil +} + +func (byteBufCodec) Unmarshal(data []byte, v interface{}) error { + b, ok := v.(*[]byte) + if !ok { + return fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) + } + *b = data + return nil +} + +func (byteBufCodec) String() string { + return "bytebuffer" +} + +// workerServer implements WorkerService rpc handlers. +// It can create benchmarkServer or benchmarkClient on demand. +type workerServer struct { + stop chan<- bool + serverPort int +} + +func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { + var bs *benchmarkServer + defer func() { + // Close benchmark server when stream ends. + grpclog.Printf("closing benchmark server") + if bs != nil { + bs.closeFunc() + } + }() + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var out *testpb.ServerStatus + switch argtype := in.Argtype.(type) { + case *testpb.ServerArgs_Setup: + grpclog.Printf("server setup received:") + if bs != nil { + grpclog.Printf("server setup received when server already exists, closing the existing server") + bs.closeFunc() + } + bs, err = startBenchmarkServer(argtype.Setup, s.serverPort) + if err != nil { + return err + } + out = &testpb.ServerStatus{ + Stats: bs.getStats(false), + Port: int32(bs.port), + Cores: int32(bs.cores), + } + + case *testpb.ServerArgs_Mark: + grpclog.Printf("server mark received:") + grpclog.Printf(" - %v", argtype) + if bs == nil { + return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received") + } + out = &testpb.ServerStatus{ + Stats: bs.getStats(argtype.Mark.Reset_), + Port: int32(bs.port), + Cores: int32(bs.cores), + } + } + + if err := stream.Send(out); err != nil { + return err + } + } +} + +func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { + var bc *benchmarkClient + defer func() { + // Shut down benchmark client when stream ends. + grpclog.Printf("shuting down benchmark client") + if bc != nil { + bc.shutdown() + } + }() + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var out *testpb.ClientStatus + switch t := in.Argtype.(type) { + case *testpb.ClientArgs_Setup: + grpclog.Printf("client setup received:") + if bc != nil { + grpclog.Printf("client setup received when client already exists, shuting down the existing client") + bc.shutdown() + } + bc, err = startBenchmarkClient(t.Setup) + if err != nil { + return err + } + out = &testpb.ClientStatus{ + Stats: bc.getStats(false), + } + + case *testpb.ClientArgs_Mark: + grpclog.Printf("client mark received:") + grpclog.Printf(" - %v", t) + if bc == nil { + return grpc.Errorf(codes.InvalidArgument, "client does not exist when mark received") + } + out = &testpb.ClientStatus{ + Stats: bc.getStats(t.Mark.Reset_), + } + } + + if err := stream.Send(out); err != nil { + return err + } + } +} + +func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) { + grpclog.Printf("core count: %v", runtime.NumCPU()) + return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil +} + +func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { + grpclog.Printf("quiting worker") + s.stop <- true + return &testpb.Void{}, nil +} + +func main() { + grpc.EnableTracing = false + + flag.Parse() + lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + grpclog.Printf("worker listening at port %v", *driverPort) + + s := grpc.NewServer() + stop := make(chan bool) + testpb.RegisterWorkerServiceServer(s, &workerServer{ + stop: stop, + serverPort: *serverPort, + }) + + go func() { + <-stop + // Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client. + // TODO revise this once server graceful stop is supported in gRPC. + time.Sleep(time.Second) + s.Stop() + }() + + s.Serve(lis) +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/util.go b/vendor/google.golang.org/grpc/benchmark/worker/util.go new file mode 100644 index 00000000000..f0016ce49f9 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/util.go @@ -0,0 +1,75 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "os" + "path/filepath" +) + +// abs returns the absolute path the given relative file or directory path, +// relative to the google.golang.org/grpc directory in the user's GOPATH. +// If rel is already absolute, it is returned unmodified. +func abs(rel string) string { + if filepath.IsAbs(rel) { + return rel + } + v, err := goPackagePath("google.golang.org/grpc") + if err != nil { + log.Fatalf("Error finding google.golang.org/grpc/testdata directory: %v", err) + } + return filepath.Join(v, rel) +} + +func goPackagePath(pkg string) (path string, err error) { + gp := os.Getenv("GOPATH") + if gp == "" { + return path, os.ErrNotExist + } + for _, p := range filepath.SplitList(gp) { + dir := filepath.Join(p, "src", filepath.FromSlash(pkg)) + fi, err := os.Stat(dir) + if os.IsNotExist(err) { + continue + } + if err != nil { + return "", err + } + if !fi.IsDir() { + continue + } + return dir, nil + } + return path, os.ErrNotExist +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9d815af39d8..d6326ea0471 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -34,13 +34,13 @@ package grpc import ( + "bytes" "io" "time" "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) @@ -48,16 +48,16 @@ import ( // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. -func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { +func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error c.headerMD, err = stream.Header() if err != nil { return err } - p := &parser{s: stream} + p := &parser{r: stream} for { - if err = recv(p, codec, reply); err != nil { + if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { if err == io.EOF { break } @@ -69,7 +69,7 @@ func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { +func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err @@ -81,31 +81,30 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t } } }() - // TODO(zhaoq): Support compression. - outBuf, err := encode(codec, args, compressionNone) + var cbuf *bytes.Buffer + if compressor != nil { + cbuf = new(bytes.Buffer) + } + outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) - if err != nil { + // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method + // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following + // recvResponse to get the final status. + if err != nil && err != io.EOF { return nil, err } // Sent successfully. return stream, nil } -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - traceInfo traceInfo // in trace.go -} - -// Invoke is called by the generated code. It sends the RPC request on the -// wire and returns after response is received. +// Invoke sends the RPC request on the wire and returns after response is received. +// Invoke is called by generated code. Also users can call Invoke directly when it +// is really needed in their use cases. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { - var c callInfo + c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) @@ -136,57 +135,82 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli Last: true, Delay: false, } - var ( - lastErr error // record the error that happened - ) for { var ( err error t transport.ClientTransport stream *transport.Stream + // Record the put handler from Balancer.Get(...). It is called once the + // RPC has completed or failed. + put func() ) - // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. - if lastErr != nil && c.failFast { - return toRPCErr(lastErr) - } + // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } - t, err = cc.dopts.picker.Pick(ctx) + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, + } + t, put, err = cc.getTransport(ctx, gopts) if err != nil { - if lastErr != nil { - // This was a retry; return the error from the last attempt. - return toRPCErr(lastErr) + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := err.(*rpcError); ok { + return err } - return toRPCErr(err) + if err == errConnClosing { + if c.failFast { + return Errorf(codes.Unavailable, "%v", errConnClosing) + } + continue + } + // All the other errors are treated as Internal errors. + return Errorf(codes.Internal, "%v", err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } - stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) + stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { + if put != nil { + put() + put = nil + } if _, ok := err.(transport.ConnectionError); ok { - lastErr = err + if c.failFast { + return toRPCErr(err) + } continue } - if lastErr != nil { - return toRPCErr(lastErr) - } return toRPCErr(err) } // Receive the response - lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) - if _, ok := lastErr.(transport.ConnectionError); ok { - continue + err = recvResponse(cc.dopts, t, &c, stream, reply) + if err != nil { + if put != nil { + put() + put = nil + } + if _, ok := err.(transport.ConnectionError); ok { + if c.failFast { + return toRPCErr(err) + } + continue + } + t.CloseStream(stream, err) + return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } - t.CloseStream(stream, lastErr) - if lastErr != nil { - return toRPCErr(lastErr) + t.CloseStream(stream, nil) + if put != nil { + put() + put = nil } - return Errorf(stream.StatusCode(), stream.StatusDesc()) + return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } } diff --git a/vendor/google.golang.org/grpc/call_test.go b/vendor/google.golang.org/grpc/call_test.go new file mode 100644 index 00000000000..4934985867b --- /dev/null +++ b/vendor/google.golang.org/grpc/call_test.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/transport" +) + +var ( + expectedRequest = "ping" + expectedResponse = "pong" + weirdError = "format verbs: %v%s" + sizeLargeErr = 1024 * 1024 + canceled = 0 +) + +type testCodec struct { +} + +func (testCodec) Marshal(v interface{}) ([]byte, error) { + return []byte(*(v.(*string))), nil +} + +func (testCodec) Unmarshal(data []byte, v interface{}) error { + *(v.(*string)) = string(data) + return nil +} + +func (testCodec) String() string { + return "test" +} + +type testStreamHandler struct { + port string + t transport.ServerTransport +} + +func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) { + p := &parser{r: s} + for { + pf, req, err := p.recvMsg() + if err == io.EOF { + break + } + if err != nil { + return + } + if pf != compressionNone { + t.Errorf("Received the mistaken message format %d, want %d", pf, compressionNone) + return + } + var v string + codec := testCodec{} + if err := codec.Unmarshal(req, &v); err != nil { + t.Errorf("Failed to unmarshal the received message: %v", err) + return + } + if v == "weird error" { + h.t.WriteStatus(s, codes.Internal, weirdError) + return + } + if v == "canceled" { + canceled++ + h.t.WriteStatus(s, codes.Internal, "") + return + } + if v == "port" { + h.t.WriteStatus(s, codes.Internal, h.port) + return + } + + if v != expectedRequest { + h.t.WriteStatus(s, codes.Internal, strings.Repeat("A", sizeLargeErr)) + return + } + } + // send a response back to end the stream. + reply, err := encode(testCodec{}, &expectedResponse, nil, nil) + if err != nil { + t.Errorf("Failed to encode the response: %v", err) + return + } + h.t.Write(s, reply, &transport.Options{}) + h.t.WriteStatus(s, codes.OK, "") +} + +type server struct { + lis net.Listener + port string + startedErr chan error // sent nil or an error after server starts + mu sync.Mutex + conns map[transport.ServerTransport]bool +} + +func newTestServer() *server { + return &server{startedErr: make(chan error, 1)} +} + +// start starts server. Other goroutines should block on s.startedErr for further operations. +func (s *server) start(t *testing.T, port int, maxStreams uint32) { + var err error + if port == 0 { + s.lis, err = net.Listen("tcp", "localhost:0") + } else { + s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) + } + if err != nil { + s.startedErr <- fmt.Errorf("failed to listen: %v", err) + return + } + _, p, err := net.SplitHostPort(s.lis.Addr().String()) + if err != nil { + s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) + return + } + s.port = p + s.conns = make(map[transport.ServerTransport]bool) + s.startedErr <- nil + for { + conn, err := s.lis.Accept() + if err != nil { + return + } + st, err := transport.NewServerTransport("http2", conn, maxStreams, nil) + if err != nil { + continue + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + st.Close() + return + } + s.conns[st] = true + s.mu.Unlock() + h := &testStreamHandler{ + port: s.port, + t: st, + } + go st.HandleStreams(func(s *transport.Stream) { + go h.handleStream(t, s) + }) + } +} + +func (s *server) wait(t *testing.T, timeout time.Duration) { + select { + case err := <-s.startedErr: + if err != nil { + t.Fatal(err) + } + case <-time.After(timeout): + t.Fatalf("Timed out after %v waiting for server to be ready", timeout) + } +} + +func (s *server) stop() { + s.lis.Close() + s.mu.Lock() + for c := range s.conns { + c.Close() + } + s.conns = nil + s.mu.Unlock() +} + +func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) { + server := newTestServer() + go server.start(t, port, maxStreams) + server.wait(t, 2*time.Second) + addr := "localhost:" + server.port + cc, err := Dial(addr, WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + return server, cc +} + +func TestInvoke(t *testing.T) { + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) + } + cc.Close() + server.stop() +} + +func TestInvokeLargeErr(t *testing.T) { + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + req := "hello" + err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + if _, ok := err.(*rpcError); !ok { + t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") + } + if Code(err) != codes.Internal || len(ErrorDesc(err)) != sizeLargeErr { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want an error of code %d and desc size %d", err, codes.Internal, sizeLargeErr) + } + cc.Close() + server.stop() +} + +// TestInvokeErrorSpecialChars checks that error messages don't get mangled. +func TestInvokeErrorSpecialChars(t *testing.T) { + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + req := "weird error" + err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + if _, ok := err.(*rpcError); !ok { + t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") + } + if got, want := ErrorDesc(err), weirdError; got != want { + t.Fatalf("grpc.Invoke(_, _, _, _, _) error = %q, want %q", got, want) + } + cc.Close() + server.stop() +} + +// TestInvokeCancel checks that an Invoke with a canceled context is not sent. +func TestInvokeCancel(t *testing.T) { + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + req := "canceled" + for i := 0; i < 100; i++ { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Invoke(ctx, "/foo/bar", &req, &reply, cc) + } + if canceled != 0 { + t.Fatalf("received %d of 100 canceled requests", canceled) + } + cc.Close() + server.stop() +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 4729bbd6abe..c3c7691dc0b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -43,28 +43,38 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/transport" ) var ( - // ErrUnspecTarget indicates that the target address is unspecified. - ErrUnspecTarget = errors.New("grpc: target is unspecified") - // ErrNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicityly - // call WithInsecure DialOption to disable security. - ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // ErrCredentialsMisuse indicates that users want to transmit security infomation - // (e.g., oauth2 token) which requires secure connection on an insecure - // connection. - ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") // ErrClientConnClosing indicates that the operation is illegal because - // the session is closing. + // the ClientConn is closing. ErrClientConnClosing = errors.New("grpc: the client connection is closing") - // ErrClientConnTimeout indicates that the connection could not be - // established or re-established within the specified timeout. - ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // ErrClientConnTimeout indicates that the ClientConn cannot establish the + // underlying connections within the specified timeout. + ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + // errNetworkIP indicates that the connection is down due to some network I/O error. + errNetworkIO = errors.New("grpc: failed with network I/O error") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + errNoAddr = errors.New("grpc: there is no address available to dial") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -73,9 +83,13 @@ var ( // values passed to Dial. type dialOptions struct { codec Codec - picker Picker + cp Compressor + dc Decompressor + bs backoffStrategy + balancer Balancer block bool insecure bool + timeout time.Duration copts transport.ConnectOptions } @@ -89,6 +103,57 @@ func WithCodec(c Codec) DialOption { } } +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithBalancer returns a DialOption which sets a load balancer. +func WithBalancer(b Balancer) DialOption { + return func(o *dialOptions) { + o.balancer = b + } +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + // Set defaults to ensure that provided BackoffConfig is valid and + // unexported fields get default values. + setDefaults(&b) + return withBackoff(b) +} + +// withBackoff sets the backoff strategy used for retries after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoffStrategy) DialOption { + return func(o *dialOptions) { + o.bs = bs + } +} + // WithBlock returns a DialOption which makes caller of Dial blocks until the underlying // connection is up. Without this, Dial returns immediately and connecting the server // happens in background. @@ -98,6 +163,8 @@ func WithBlock() DialOption { } } +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. func WithInsecure() DialOption { return func(o *dialOptions) { o.insecure = true @@ -106,24 +173,25 @@ func WithInsecure() DialOption { // WithTransportCredentials returns a DialOption which configures a // connection level security credentials (e.g., TLS/SSL). -func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + o.copts.TransportCredentials = creds } } // WithPerRPCCredentials returns a DialOption which sets // credentials which will place auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.Credentials) DialOption { +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) } } -// WithTimeout returns a DialOption that configures a timeout for dialing a client connection. +// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn +// initially. This is valid if and only if WithBlock() is present. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { - o.copts.Timeout = d + o.timeout = d } } @@ -145,20 +213,67 @@ func WithUserAgent(s string) DialOption { func Dial(target string, opts ...DialOption) (*ClientConn, error) { cc := &ClientConn{ target: target, + conns: make(map[Address]*addrConn), } for _, opt := range opts { opt(&cc.dopts) } + + // Set defaults. if cc.dopts.codec == nil { - // Set the default codec. cc.dopts.codec = protoCodec{} } - if cc.dopts.picker == nil { - cc.dopts.picker = &unicastPicker{} + if cc.dopts.bs == nil { + cc.dopts.bs = DefaultBackoffConfig } - if err := cc.dopts.picker.Init(cc); err != nil { + if cc.dopts.balancer == nil { + cc.dopts.balancer = RoundRobin(nil) + } + + if err := cc.dopts.balancer.Start(target); err != nil { return nil, err } + var ( + ok bool + addrs []Address + ) + ch := cc.dopts.balancer.Notify() + if ch == nil { + // There is no name resolver installed. + addrs = append(addrs, Address{Addr: target}) + } else { + addrs, ok = <-ch + if !ok || len(addrs) == 0 { + return nil, errNoAddr + } + } + waitC := make(chan error, 1) + go func() { + for _, a := range addrs { + if err := cc.newAddrConn(a, false); err != nil { + waitC <- err + return + } + } + close(waitC) + }() + var timeoutCh <-chan time.Time + if cc.dopts.timeout > 0 { + timeoutCh = time.After(cc.dopts.timeout) + } + select { + case err := <-waitC: + if err != nil { + cc.Close() + return nil, err + } + case <-timeoutCh: + cc.Close() + return nil, ErrClientConnTimeout + } + if ok { + go cc.lbWatcher() + } colonPos := strings.LastIndex(target, ":") if colonPos == -1 { colonPos = len(target) @@ -200,293 +315,363 @@ func (s ConnectivityState) String() string { } } -// ClientConn represents a client connection to an RPC service. +// ClientConn represents a client connection to an RPC server. type ClientConn struct { target string authority string dopts dialOptions -} - -// State returns the connectivity state of cc. -// This is EXPERIMENTAL API. -func (cc *ClientConn) State() ConnectivityState { - return cc.dopts.picker.State() -} - -// WaitForStateChange blocks until the state changes to something other than the sourceState -// or timeout fires on cc. It returns false if timeout fires, and true otherwise. -// This is EXPERIMENTAL API. -func (cc *ClientConn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { - return cc.dopts.picker.WaitForStateChange(timeout, sourceState) -} -// Close starts to tear down the ClientConn. -func (cc *ClientConn) Close() error { - return cc.dopts.picker.Close() + mu sync.RWMutex + conns map[Address]*addrConn } -// Conn is a client connection to a single destination. -type Conn struct { - target string - dopts dialOptions - shutdownChan chan struct{} - events trace.EventLog - - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} - transport transport.ClientTransport +func (cc *ClientConn) lbWatcher() { + for addrs := range cc.dopts.balancer.Notify() { + var ( + add []Address // Addresses need to setup connections. + del []*addrConn // Connections need to tear down. + ) + cc.mu.Lock() + for _, a := range addrs { + if _, ok := cc.conns[a]; !ok { + add = append(add, a) + } + } + for k, c := range cc.conns { + var keep bool + for _, a := range addrs { + if k == a { + keep = true + break + } + } + if !keep { + del = append(del, c) + } + } + cc.mu.Unlock() + for _, a := range add { + cc.newAddrConn(a, true) + } + for _, c := range del { + c.tearDown(errConnDrain) + } + } } -// NewConn creates a Conn. -func NewConn(cc *ClientConn) (*Conn, error) { - if cc.target == "" { - return nil, ErrUnspecTarget - } - c := &Conn{ - target: cc.target, +func (cc *ClientConn) newAddrConn(addr Address, skipWait bool) error { + ac := &addrConn{ + cc: cc, + addr: addr, dopts: cc.dopts, shutdownChan: make(chan struct{}), } if EnableTracing { - c.events = trace.NewEventLog("grpc.ClientConn", c.target) + ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) } - if !c.dopts.insecure { - var ok bool - for _, cd := range c.dopts.copts.AuthOptions { - if _, ok := cd.(credentials.TransportAuthenticator); !ok { - continue - } - ok = true - } - if !ok { - return nil, ErrNoTransportSecurity + if !ac.dopts.insecure { + if ac.dopts.copts.TransportCredentials == nil { + return errNoTransportSecurity } } else { - for _, cd := range c.dopts.copts.AuthOptions { + if ac.dopts.copts.TransportCredentials != nil { + return errCredentialsConflict + } + for _, cd := range ac.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { - return nil, ErrCredentialsMisuse + return errTransportCredentialsMissing } } } - c.stateCV = sync.NewCond(&c.mu) - if c.dopts.block { - if err := c.resetTransport(false); err != nil { - c.Close() - return nil, err + // Insert ac into ac.cc.conns. This needs to be done before any getTransport(...) is called. + ac.cc.mu.Lock() + if ac.cc.conns == nil { + ac.cc.mu.Unlock() + return ErrClientConnClosing + } + stale := ac.cc.conns[ac.addr] + ac.cc.conns[ac.addr] = ac + ac.cc.mu.Unlock() + if stale != nil { + // There is an addrConn alive on ac.addr already. This could be due to + // i) stale's Close is undergoing; + // ii) a buggy Balancer notifies duplicated Addresses. + stale.tearDown(errConnDrain) + } + ac.stateCV = sync.NewCond(&ac.mu) + // skipWait may overwrite the decision in ac.dopts.block. + if ac.dopts.block && !skipWait { + if err := ac.resetTransport(false); err != nil { + ac.tearDown(err) + return err } // Start to monitor the error status of transport. - go c.transportMonitor() + go ac.transportMonitor() } else { // Start a goroutine connecting to the server asynchronously. go func() { - if err := c.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) - c.Close() + if err := ac.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) + ac.tearDown(err) return } - c.transportMonitor() + ac.transportMonitor() }() } - return c, nil + return nil } -// printf records an event in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) printf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Printf(format, a...) +func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { + addr, put, err := cc.dopts.balancer.Get(ctx, opts) + if err != nil { + return nil, nil, toRPCErr(err) + } + cc.mu.RLock() + if cc.conns == nil { + cc.mu.RUnlock() + return nil, nil, toRPCErr(ErrClientConnClosing) + } + ac, ok := cc.conns[addr] + cc.mu.RUnlock() + if !ok { + if put != nil { + put() + } + return nil, nil, Errorf(codes.Internal, "grpc: failed to find the transport to send the rpc") + } + t, err := ac.wait(ctx, !opts.BlockingWait) + if err != nil { + if put != nil { + put() + } + return nil, nil, err } + return t, put, nil } -// errorf records an error in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) errorf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Errorf(format, a...) +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.mu.Unlock() + cc.dopts.balancer.Close() + for _, ac := range conns { + ac.tearDown(ErrClientConnClosing) } + return nil } -// State returns the connectivity state of the Conn -func (cc *Conn) State() ConnectivityState { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.state +// addrConn is a network connection to a given address. +type addrConn struct { + cc *ClientConn + addr Address + dopts dialOptions + shutdownChan chan struct{} + events trace.EventLog + + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond + down func(error) // the handler called when a connection is down. + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport } -// WaitForStateChange blocks until the state changes to something other than the sourceState -// or timeout fires. It returns false if timeout fires and true otherwise. -// TODO(zhaoq): Rewrite for complex Picker. -func (cc *Conn) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { - start := time.Now() - cc.mu.Lock() - defer cc.mu.Unlock() - if sourceState != cc.state { - return true +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) } - expired := timeout <= time.Since(start) - if expired { - return false +} + +// errorf records an error in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) errorf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Errorf(format, a...) + } +} + +// getState returns the connectivity state of the Conn +func (ac *addrConn) getState() ConnectivityState { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +// waitForStateChange blocks until the state changes to something other than the sourceState. +func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if sourceState != ac.state { + return ac.state, nil } done := make(chan struct{}) + var err error go func() { select { - case <-time.After(timeout - time.Since(start)): - cc.mu.Lock() - expired = true - cc.stateCV.Broadcast() - cc.mu.Unlock() + case <-ctx.Done(): + ac.mu.Lock() + err = ctx.Err() + ac.stateCV.Broadcast() + ac.mu.Unlock() case <-done: } }() defer close(done) - for sourceState == cc.state { - cc.stateCV.Wait() - if expired { - return false + for sourceState == ac.state { + ac.stateCV.Wait() + if err != nil { + return ac.state, err } } - return true + return ac.state, nil } -func (cc *Conn) resetTransport(closeTransport bool) error { +func (ac *addrConn) resetTransport(closeTransport bool) error { var retries int - start := time.Now() for { - cc.mu.Lock() - cc.printf("connecting") - if cc.state == Shutdown { - cc.mu.Unlock() - return ErrClientConnClosing + ac.mu.Lock() + ac.printf("connecting") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing } - cc.state = Connecting - cc.stateCV.Broadcast() - cc.mu.Unlock() - if closeTransport { - cc.transport.Close() - } - // Adjust timeout for the current try. - copts := cc.dopts.copts - if copts.Timeout < 0 { - cc.Close() - return ErrClientConnTimeout - } - if copts.Timeout > 0 { - copts.Timeout -= time.Since(start) - if copts.Timeout <= 0 { - cc.Close() - return ErrClientConnTimeout - } + if ac.down != nil { + ac.down(downErrorf(false, true, "%v", errNetworkIO)) + ac.down = nil } - sleepTime := backoff(retries) - timeout := sleepTime - if timeout < minConnectTimeout { - timeout = minConnectTimeout + ac.state = Connecting + ac.stateCV.Broadcast() + t := ac.transport + ac.mu.Unlock() + if closeTransport && t != nil { + t.Close() } - if copts.Timeout == 0 || copts.Timeout > timeout { - copts.Timeout = timeout + sleepTime := ac.dopts.bs.backoff(retries) + ac.dopts.copts.Timeout = sleepTime + if sleepTime < minConnectTimeout { + ac.dopts.copts.Timeout = minConnectTimeout } connectTime := time.Now() - newTransport, err := transport.NewClientTransport(cc.target, &copts) + newTransport, err := transport.NewClientTransport(ac.addr.Addr, &ac.dopts.copts) if err != nil { - cc.mu.Lock() - cc.errorf("transient failure: %v", err) - cc.state = TransientFailure - cc.stateCV.Broadcast() - if cc.ready != nil { - close(cc.ready) - cc.ready = nil + ac.mu.Lock() + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.errorf("transient failure: %v", err) + ac.state = TransientFailure + ac.stateCV.Broadcast() + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - cc.mu.Unlock() + ac.mu.Unlock() sleepTime -= time.Since(connectTime) if sleepTime < 0 { sleepTime = 0 } - // Fail early before falling into sleep. - if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { - cc.mu.Lock() - cc.errorf("connection timeout") - cc.mu.Unlock() - cc.Close() - return ErrClientConnTimeout - } closeTransport = false - time.Sleep(sleepTime) + select { + case <-time.After(sleepTime): + case <-ac.shutdownChan: + } retries++ - grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr) continue } - cc.mu.Lock() - cc.printf("ready") - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() + ac.mu.Lock() + ac.printf("ready") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() newTransport.Close() - return ErrClientConnClosing + return errConnClosing } - cc.state = Ready - cc.stateCV.Broadcast() - cc.transport = newTransport - if cc.ready != nil { - close(cc.ready) - cc.ready = nil + ac.state = Ready + ac.stateCV.Broadcast() + ac.transport = newTransport + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - cc.mu.Unlock() + ac.down = ac.cc.dopts.balancer.Up(ac.addr) + ac.mu.Unlock() return nil } } // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. -func (cc *Conn) transportMonitor() { +func (ac *addrConn) transportMonitor() { for { + ac.mu.Lock() + t := ac.transport + ac.mu.Unlock() select { // shutdownChan is needed to detect the teardown when - // the ClientConn is idle (i.e., no RPC in flight). - case <-cc.shutdownChan: + // the addrConn is idle (i.e., no RPC in flight). + case <-ac.shutdownChan: return - case <-cc.transport.Error(): - cc.mu.Lock() - cc.state = TransientFailure - cc.stateCV.Broadcast() - cc.mu.Unlock() - if err := cc.resetTransport(true); err != nil { - // The ClientConn is closing. - cc.mu.Lock() - cc.printf("transport exiting: %v", err) - cc.mu.Unlock() - grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) + case <-t.Error(): + ac.mu.Lock() + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return + } + ac.state = TransientFailure + ac.stateCV.Broadcast() + ac.mu.Unlock() + if err := ac.resetTransport(true); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) return } - continue } } } -// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. -func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { +// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or +// iv) transport is in TransientFailure and the RPC is fail-fast. +func (ac *addrConn) wait(ctx context.Context, failFast bool) (transport.ClientTransport, error) { for { - cc.mu.Lock() + ac.mu.Lock() switch { - case cc.state == Shutdown: - cc.mu.Unlock() - return nil, ErrClientConnClosing - case cc.state == Ready: - cc.mu.Unlock() - return cc.transport, nil + case ac.state == Shutdown: + ac.mu.Unlock() + return nil, errConnClosing + case ac.state == Ready: + ct := ac.transport + ac.mu.Unlock() + return ct, nil + case ac.state == TransientFailure && failFast: + ac.mu.Unlock() + return nil, Errorf(codes.Unavailable, "grpc: RPC failed fast due to transport failure") default: - ready := cc.ready + ready := ac.ready if ready == nil { ready = make(chan struct{}) - cc.ready = ready + ac.ready = ready } - cc.mu.Unlock() + ac.mu.Unlock() select { case <-ctx.Done(): - return nil, transport.ContextErr(ctx.Err()) + return nil, toRPCErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } @@ -494,32 +679,46 @@ func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { } } -// Close starts to tear down the Conn. Returns ErrClientConnClosing if -// it has been closed (mostly due to dial time-out). +// tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many ClientConn's in a +// some edge cases (e.g., the caller opens and closes many addrConn's in a // tight loop. -func (cc *Conn) Close() error { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.state == Shutdown { - return ErrClientConnClosing - } - cc.state = Shutdown - cc.stateCV.Broadcast() - if cc.events != nil { - cc.events.Finish() - cc.events = nil - } - if cc.ready != nil { - close(cc.ready) - cc.ready = nil - } - if cc.transport != nil { - cc.transport.Close() +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + defer func() { + ac.mu.Unlock() + ac.cc.mu.Lock() + if ac.cc.conns != nil { + delete(ac.cc.conns, ac.addr) + } + ac.cc.mu.Unlock() + }() + if ac.state == Shutdown { + return + } + ac.state = Shutdown + if ac.down != nil { + ac.down(downErrorf(false, false, "%v", err)) + ac.down = nil + } + ac.stateCV.Broadcast() + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + if ac.transport != nil { + if err == errConnDrain { + ac.transport.GracefulClose() + } else { + ac.transport.Close() + } } - if cc.shutdownChan != nil { - close(cc.shutdownChan) + if ac.shutdownChan != nil { + close(ac.shutdownChan) } - return nil + return } diff --git a/vendor/google.golang.org/grpc/clientconn_test.go b/vendor/google.golang.org/grpc/clientconn_test.go new file mode 100644 index 00000000000..71ba45e6591 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn_test.go @@ -0,0 +1,127 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "testing" + "time" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" +) + +const tlsDir = "testdata/" + +func TestDialTimeout(t *testing.T) { + conn, err := Dial("Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure()) + if err == nil { + conn.Close() + } + if err != ErrClientConnTimeout { + t.Fatalf("Dial(_, _) = %v, %v, want %v", conn, err, ErrClientConnTimeout) + } +} + +func TestTLSDialTimeout(t *testing.T) { + creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + conn, err := Dial("Non-Existent.Server:80", WithTransportCredentials(creds), WithTimeout(time.Millisecond), WithBlock()) + if err == nil { + conn.Close() + } + if err != ErrClientConnTimeout { + t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, ErrClientConnTimeout) + } +} + +func TestCredentialsMisuse(t *testing.T) { + tlsCreds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + if err != nil { + t.Fatalf("Failed to create authenticator %v", err) + } + // Two conflicting credential configurations + if _, err := Dial("Non-Existent.Server:80", WithTransportCredentials(tlsCreds), WithBlock(), WithInsecure()); err != errCredentialsConflict { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errCredentialsConflict) + } + rpcCreds, err := oauth.NewJWTAccessFromKey(nil) + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + // security info on insecure connection + if _, err := Dial("Non-Existent.Server:80", WithPerRPCCredentials(rpcCreds), WithBlock(), WithInsecure()); err != errTransportCredentialsMissing { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredentialsMissing) + } +} + +func TestWithBackoffConfigDefault(t *testing.T) { + testBackoffConfigSet(t, &DefaultBackoffConfig) +} + +func TestWithBackoffConfig(t *testing.T) { + b := BackoffConfig{MaxDelay: DefaultBackoffConfig.MaxDelay / 2} + expected := b + setDefaults(&expected) // defaults should be set + testBackoffConfigSet(t, &expected, WithBackoffConfig(b)) +} + +func TestWithBackoffMaxDelay(t *testing.T) { + md := DefaultBackoffConfig.MaxDelay / 2 + expected := BackoffConfig{MaxDelay: md} + setDefaults(&expected) + testBackoffConfigSet(t, &expected, WithBackoffMaxDelay(md)) +} + +func testBackoffConfigSet(t *testing.T, expected *BackoffConfig, opts ...DialOption) { + opts = append(opts, WithInsecure()) + conn, err := Dial("foo:80", opts...) + if err != nil { + t.Fatalf("unexpected error dialing connection: %v", err) + } + + if conn.dopts.bs == nil { + t.Fatalf("backoff config not set") + } + + actual, ok := conn.dopts.bs.(BackoffConfig) + if !ok { + t.Fatalf("unexpected type of backoff config: %#v", conn.dopts.bs) + } + + if actual != *expected { + t.Fatalf("unexpected backoff config on connection: %v, want %v", actual, expected) + } + conn.Close() +} diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh new file mode 100755 index 00000000000..120235374a4 --- /dev/null +++ b/vendor/google.golang.org/grpc/coverage.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +set -e + +workdir=.cover +profile="$workdir/cover.out" +mode=set +end2endtest="google.golang.org/grpc/test" + +generate_cover_data() { + rm -rf "$workdir" + mkdir "$workdir" + + for pkg in "$@"; do + if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] + then + f="$workdir/$(echo $pkg | tr / -)" + go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" + go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" + fi + done + + echo "mode: $mode" >"$profile" + grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" +} + +show_cover_report() { + go tool cover -${1}="$profile" +} + +push_to_coveralls() { + goveralls -coverprofile="$profile" +} + +generate_cover_data $(go list ./...) +show_cover_report func +case "$1" in +"") + ;; +--html) + show_cover_report html ;; +--coveralls) + push_to_coveralls ;; +*) + echo >&2 "error: invalid option: $1" ;; +esac +rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index cde38dc4e4c..8d4c57ccf69 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -54,9 +54,9 @@ var ( alpnProtoStr = []string{"h2"} ) -// Credentials defines the common interface all supported credentials must -// implement. -type Credentials interface { +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other @@ -66,7 +66,7 @@ type Credentials interface { // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentails requires + // RequireTransportSecurity indicates whether the credentials requires // transport security. RequireTransportSecurity() bool } @@ -87,22 +87,9 @@ type AuthInfo interface { AuthType() string } -type authInfoKey struct{} - -// NewContext creates a new context with authInfo attached. -func NewContext(ctx context.Context, authInfo AuthInfo) context.Context { - return context.WithValue(ctx, authInfoKey{}, authInfo) -} - -// FromContext returns the authInfo in ctx if it exists. -func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) { - authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo) - return -} - -// TransportAuthenticator defines the common interface for all the live gRPC wire +// TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportAuthenticator interface { +type TransportCredentials interface { // ClientHandshake does the authentication handshake specified by the corresponding // authentication protocol on rawConn for clients. It returns the authenticated // connection and the corresponding auth information about the connection. @@ -111,9 +98,8 @@ type TransportAuthenticator interface { // the authenticated connection and the corresponding auth information about // the connection. ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportAuthenticator. + // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo - Credentials } // TLSInfo contains the auth information for a TLS authenticated connection. @@ -122,6 +108,7 @@ type TLSInfo struct { State tls.ConnectionState } +// AuthType returns the type of TLSInfo as a string. func (t TLSInfo) AuthType() string { return "tls" } @@ -129,7 +116,7 @@ func (t TLSInfo) AuthType() string { // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration - config tls.Config + config *tls.Config } func (c tlsCreds) Info() ProtocolInfo { @@ -164,14 +151,16 @@ func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.D errChannel <- timeoutError{} }) } + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := *c.config if c.config.ServerName == "" { colonPos := strings.LastIndex(addr, ":") if colonPos == -1 { colonPos = len(addr) } - c.config.ServerName = addr[:colonPos] + cfg.ServerName = addr[:colonPos] } - conn := tls.Client(rawConn, &c.config) + conn := tls.Client(rawConn, &cfg) if timeout == 0 { err = conn.Handshake() } else { @@ -190,7 +179,7 @@ func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.D } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, &c.config) + conn := tls.Server(rawConn, c.config) if err := conn.Handshake(); err != nil { rawConn.Close() return nil, nil, err @@ -198,20 +187,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) return conn, TLSInfo{conn.ConnectionState()}, nil } -// NewTLS uses c to construct a TransportAuthenticator based on TLS. -func NewTLS(c *tls.Config) TransportAuthenticator { - tc := &tlsCreds{*c} +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{c} tc.config.NextProtos = alpnProtoStr return tc } // NewClientTLSFromCert constructs a TLS from the input certificate for client. -func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { +func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) } // NewClientTLSFromFile constructs a TLS from the input certificate file for client. -func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { +func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { return nil, err @@ -224,13 +213,13 @@ func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, } // NewServerTLSFromCert constructs a TLS from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } // NewServerTLSFromFile constructs a TLS from the input certificate file and key // file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, err diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 00000000000..8e68c4d73b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "fmt" + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentails requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.TokenType + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + config *jwt.Config +} + +func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.TokenType + " " + token.AccessToken, + }, nil +} + +func (s serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index c63847745da..a35f218852b 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -1,6 +1,6 @@ /* Package grpc implements an RPC system called gRPC. -See https://github.com/grpc/grpc for more information about gRPC. +See www.grpc.io for more information about gRPC. */ package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/examples/README.md b/vendor/google.golang.org/grpc/examples/README.md new file mode 100644 index 00000000000..b65f8c5e632 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/README.md @@ -0,0 +1,57 @@ +gRPC in 3 minutes (Go) +====================== + +BACKGROUND +------------- +For this sample, we've already generated the server and client stubs from [helloworld.proto](helloworld/helloworld/helloworld.proto). + +PREREQUISITES +------------- + +- This requires Go 1.4 +- Requires that [GOPATH is set](https://golang.org/doc/code.html#GOPATH) + +``` +$ go help gopath +$ # ensure the PATH contains $GOPATH/bin +$ export PATH=$PATH:$GOPATH/bin +``` + +INSTALL +------- + +``` +$ go get -u google.golang.org/grpc/examples/helloworld/greeter_client +$ go get -u google.golang.org/grpc/examples/helloworld/greeter_server +``` + +TRY IT! +------- + +- Run the server + +``` +$ greeter_server & +``` + +- Run the client + +``` +$ greeter_client +``` + +OPTIONAL - Rebuilding the generated code +---------------------------------------- + +1 First [install protoc](https://github.com/google/protobuf/blob/master/README.md) + - For now, this needs to be installed from source + - This is will change once proto3 is officially released + +2 Install the protoc Go plugin. + +``` +$ go get -a github.com/golang/protobuf/protoc-gen-go +$ +$ # from this dir; invoke protoc +$ protoc -I ./helloworld/helloworld/ ./helloworld/helloworld/helloworld.proto --go_out=plugins=grpc:helloworld +``` diff --git a/vendor/google.golang.org/grpc/examples/gotutorial.md b/vendor/google.golang.org/grpc/examples/gotutorial.md new file mode 100644 index 00000000000..39833fdf3da --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/gotutorial.md @@ -0,0 +1,431 @@ +#gRPC Basics: Go + +This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to: + +- Define a service in a .proto file. +- Generate server and client code using the protocol buffer compiler. +- Use the Go gRPC API to write a simple client and server for your service. + +It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, which is currently in alpha release:you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. + +This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. + +## Why use gRPC? + +Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients. + +With gRPC we can define our service once in a .proto file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. + +## Example code and setup + +The example code for our tutorial is in [grpc/grpc-go/examples/route_guide](https://github.com/grpc/grpc-go/tree/master/examples/route_guide). To download the example, clone the `grpc-go` repository by running the following command: +```shell +$ go get google.golang.org/grpc +``` + +Then change your current directory to `grpc-go/examples/route_guide`: +```shell +$ cd $GOPATH/src/google.golang.org/grpc/examples/route_guide +``` + +You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Go quick start guide](examples/). + + +## Defining the service + +Our first step (as you'll know from the [quick start](http://www.grpc.io/docs/#quick-start)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). You can see the complete .proto file in [`examples/route_guide/proto/route_guide.proto`](examples/route_guide/proto/route_guide.proto). + +To define a service, you specify a named `service` in your .proto file: + +```proto +service RouteGuide { + ... +} +``` + +Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service: + +- A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call. +```proto + // Obtains the feature at a given position. + rpc GetFeature(Point) returns (Feature) {} +``` + +- A *server-side streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in our example, you specify a server-side streaming method by placing the `stream` keyword before the *response* type. +```proto + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} +``` + +- A *client-side streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a client-side streaming method by placing the `stream` keyword before the *request* type. +```proto + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} +``` + +- A *bidirectional streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response. +```proto + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +``` + +Our .proto file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: +```proto +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} +``` + + +## Generating client and server code + +Next we need to generate the gRPC client and server interfaces from our .proto service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. + +For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first): + +```shell +$ codegen.sh route_guide.proto +``` + +which actually runs: + +```shell +$ protoc --go_out=plugins=grpc:. route_guide.proto +``` + +Running this command generates the following file in your current directory: +- `route_guide.pb.go` + +This contains: +- All the protocol buffer code to populate, serialize, and retrieve our request and response message types +- An interface type (or *stub*) for clients to call with the methods defined in the `RouteGuide` service. +- An interface type for servers to implement, also with the methods defined in the `RouteGuide` service. + + + +## Creating the server + +First let's look at how we create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!). + +There are two parts to making our `RouteGuide` service do its job: +- Implementing the service interface generated from our service definition: doing the actual "work" of our service. +- Running a gRPC server to listen for requests from clients and dispatch them to the right service implementation. + +You can find our example `RouteGuide` server in [grpc-go/examples/route_guide/server/server.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/server/server.go). Let's take a closer look at how it works. + +### Implementing RouteGuide + +As you can see, our server has a `routeGuideServer` struct type that implements the generated `RouteGuideServer` interface: + +```go +type routeGuideServer struct { + ... +} +... + +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + ... +} +... + +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + ... +} +... + +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + ... +} +... + +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + ... +} +... +``` + +#### Simple RPC +`routeGuideServer` implements all our service methods. Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`. + +```go +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{"", point}, nil +} +``` + +The method is passed a context object for the RPC and the client's `Point` protocol buffer request. It returns a `Feature` protocol buffer object with the response information and an `error`. In the method we populate the `Feature` with the appropriate information, and then `return` it along with an `nil` error to tell gRPC that we've finished dealing with the RPC and that the `Feature` can be returned to the client. + +#### Server-side streaming RPC +Now let's look at one of our streaming RPCs. `ListFeatures` is a server-side streaming RPC, so we need to send back multiple `Feature`s to our client. + +```go +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} +``` + +As you can see, instead of getting simple request and response objects in our method parameters, this time we get a request object (the `Rectangle` in which our client wants to find `Feature`s) and a special `RouteGuide_ListFeaturesServer` object to write our responses. + +In the method, we populate as many `Feature` objects as we need to return, writing them to the `RouteGuide_ListFeaturesServer` using its `Send()` method. Finally, as in our simple RPC, we return a `nil` error to tell gRPC that we've finished writing responses. Should any error happen in this call, we return a non-`nil` error; the gRPC layer will translate it into an appropriate RPC status to be sent on the wire. + +#### Client-side streaming RPC +Now let's look at something a little more complicated: the client-side streaming method `RecordRoute`, where we get a stream of `Point`s from the client and return a single `RouteSummary` with information about their trip. As you can see, this time the method doesn't have a request parameter at all. Instead, it gets a `RouteGuide_RecordRouteServer` stream, which the server can use to both read *and* write messages - it can receive client messages using its `Recv()` method and return its single response using its `SendAndClose()` method. + +```go +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} +``` + +In the method body we use the `RouteGuide_RecordRouteServer`s `Recv()` method to repeatedly read in our client's requests to a request object (in this case a `Point`) until there are no more messages: the server needs to check the the error returned from `Recv()` after each call. If this is `nil`, the stream is still good and it can continue reading; if it's `io.EOF` the message stream has ended and the server can return its `RouteSummary`. If it has any other value, we return the error "as is" so that it'll be translated to an RPC status by the gRPC layer. + +#### Bidirectional streaming RPC +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. + +```go +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + ... // look for notes to be sent to client + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} +``` + +This time we get a `RouteGuide_RouteChatServer` stream that, as in our client-side streaming example, can be used to read and write messages. However, this time we return values via our method's stream while the client is still writing messages to *their* message stream. + +The syntax for reading and writing here is very similar to our client-streaming method, except the server uses the stream's `Send()` method rather than `SendAndClose()` because it's writing multiple responses. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +### Starting the server + +Once we've implemented all our methods, we also need to start up a gRPC server so that clients can actually use our service. The following snippet shows how we do this for our `RouteGuide` service: + +```go +flag.Parse() +lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) +if err != nil { + log.Fatalf("failed to listen: %v", err) +} +grpcServer := grpc.NewServer() +pb.RegisterRouteGuideServer(grpcServer, &routeGuideServer{}) +... // determine whether to use TLS +grpcServer.Serve(lis) +``` +To build and start a server, we: + +1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))`. +2. Create an instance of the gRPC server using `grpc.NewServer()`. +3. Register our service implementation with the gRPC server. +4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called. + + +## Creating the client + +In this section, we'll look at creating a Go client for our `RouteGuide` service. You can see our complete example client code in [grpc-go/examples/route_guide/client/client.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/client/client.go). + +### Creating a stub + +To call service methods, we first need to create a gRPC *channel* to communicate with the server. We create this by passing the server address and port number to `grpc.Dial()` as follows: + +```go +conn, err := grpc.Dial(*serverAddr) +if err != nil { + ... +} +defer conn.Close() +``` + +You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service. + +Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our .proto. + +```go +client := pb.NewRouteGuideClient(conn) +``` + +### Calling service methods + +Now let's look at how we call our service methods. Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode, which means that the RPC call waits for the server to respond, and will either return a response or an error. + +#### Simple RPC + +Calling the simple RPC `GetFeature` is nearly as straightforward as calling a local method. + +```go +feature, err := client.GetFeature(context.Background(), &pb.Point{409146138, -746188906}) +if err != nil { + ... +} +``` + +As you can see, we call the method on the stub we got earlier. In our method parameters we create and populate a request protocol buffer object (in our case `Point`). We also pass a `context.Context` object which lets us change our RPC's behaviour if necessary, such as time-out/cancel an RPC in flight. If the call doesn't return an error, then we can read the response information from the server from the first return value. + +```go +log.Println(feature) +``` + +#### Server-side streaming RPC + +Here's where we call the server-side streaming method `ListFeatures`, which returns a stream of geographical `Feature`s. If you've already read [Creating the server](#server) some of this may look very familiar - streaming RPCs are implemented in a similar way on both sides. + +```go +rect := &pb.Rectangle{ ... } // initialize a pb.Rectangle +stream, err := client.ListFeatures(context.Background(), rect) +if err != nil { + ... +} +for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + log.Println(feature) +} +``` + +As in the simple RPC, we pass the method a context and a request. However, instead of getting a response object back, we get back an instance of `RouteGuide_ListFeaturesClient`. The client can use the `RouteGuide_ListFeaturesClient` stream to read the server's responses. + +We use the `RouteGuide_ListFeaturesClient`'s `Recv()` method to repeatedly read in the server's responses to a response protocol buffer object (in this case a `Feature`) until there are no more messages: the client needs to check the error `err` returned from `Recv()` after each call. If `nil`, the stream is still good and it can continue reading; if it's `io.EOF` then the message stream has ended; otherwise there must be an RPC error, which is passed over through `err`. + +#### Client-side streaming RPC + +The client-side streaming method `RecordRoute` is similar to the server-side method, except that we only pass the method a context and get a `RouteGuide_RecordRouteClient` stream back, which we can use to both write *and* read messages. + +```go +// Create a random number of random points +r := rand.New(rand.NewSource(time.Now().UnixNano())) +pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points +var points []*pb.Point +for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) +} +log.Printf("Traversing %d points.", len(points)) +stream, err := client.RecordRoute(context.Background()) +if err != nil { + log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) +} +for _, point := range points { + if err := stream.Send(point); err != nil { + log.Fatalf("%v.Send(%v) = %v", stream, point, err) + } +} +reply, err := stream.CloseAndRecv() +if err != nil { + log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) +} +log.Printf("Route summary: %v", reply) +``` + +The `RouteGuide_RecordRouteClient` has a `Send()` method that we can use to send requests to the server. Once we've finished writing our client's requests to the stream using `Send()`, we need to call `CloseAndRecv()` on the stream to let gRPC know that we've finished writing and are expecting to receive a response. We get our RPC status from the `err` returned from `CloseAndRecv()`. If the status is `nil`, then the first return value from `CloseAndRecv()` will be a valid server response. + +#### Bidirectional streaming RPC + +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. As in the case of `RecordRoute`, we only pass the method a context object and get back a stream that we can use to both write and read messages. However, this time we return values via our method's stream while the server is still writing messages to *their* message stream. + +```go +stream, err := client.RouteChat(context.Background()) +waitc := make(chan struct{}) +go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + log.Fatalf("Failed to receive a note : %v", err) + } + log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } +}() +for _, note := range notes { + if err := stream.Send(note); err != nil { + log.Fatalf("Failed to send a note: %v", err) + } +} +stream.CloseSend() +<-waitc +``` + +The syntax for reading and writing here is very similar to our client-side streaming method, except we use the stream's `CloseSend()` method once we've finished our call. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +## Try it out! + +To compile and run the server, assuming you are in the folder +`$GOPATH/src/google.golang.org/grpc/examples/route_guide`, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go new file mode 100644 index 00000000000..a451c99dc92 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "os" + + "golang.org/x/net/context" + "google.golang.org/grpc" + pb "google.golang.org/grpc/examples/helloworld/helloworld" +) + +const ( + address = "localhost:50051" + defaultName = "world" +) + +func main() { + // Set up a connection to the server. + conn, err := grpc.Dial(address, grpc.WithInsecure()) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + // Contact the server and print out its response. + name := defaultName + if len(os.Args) > 1 { + name = os.Args[1] + } + r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + log.Printf("Greeting: %s", r.Message) +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go new file mode 100644 index 00000000000..4dce9e07967 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc" + pb "google.golang.org/grpc/examples/helloworld/helloworld" +) + +const ( + port = ":50051" +) + +// server is used to implement helloworld.GreeterServer. +type server struct{} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return &pb.HelloReply{Message: "Hello " + in.Name}, nil +} + +func main() { + lis, err := net.Listen("tcp", port) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + s.Serve(lis) +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go new file mode 100644 index 00000000000..0419f6af7c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-go. +// source: helloworld.proto +// DO NOT EDIT! + +/* +Package helloworld is a generated protocol buffer package. + +It is generated from these files: + helloworld.proto + +It has these top-level messages: + HelloRequest + HelloReply +*/ +package helloworld + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The request message containing the user's name. +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} +func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The response message containing the greetings +type HelloReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *HelloReply) Reset() { *m = HelloReply{} } +func (m *HelloReply) String() string { return proto.CompactTextString(m) } +func (*HelloReply) ProtoMessage() {} +func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") + proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for Greeter service + +type GreeterClient interface { + // Sends a greeting + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterServer interface { + // Sends a greeting + SayHello(context.Context, *HelloRequest) (*HelloReply, error) +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 174 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, + 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, + 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, + 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, + 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, + 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, + 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, + 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x00, 0xad, 0x50, 0x62, 0x70, 0x32, 0xe0, 0x92, 0xce, 0xcc, 0xd7, + 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0x46, 0x52, 0xeb, + 0xc4, 0x0f, 0x56, 0x1c, 0x0e, 0x62, 0x07, 0x80, 0xbc, 0x14, 0xc0, 0x98, 0xc4, 0x06, 0xf6, 0x9b, + 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto new file mode 100644 index 00000000000..c3ddd4aee77 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto @@ -0,0 +1,52 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.helloworld"; +option java_outer_classname = "HelloWorldProto"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/README.md b/vendor/google.golang.org/grpc/examples/route_guide/README.md new file mode 100644 index 00000000000..a7c0c2b0e53 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/README.md @@ -0,0 +1,35 @@ +# Description +The route guide server and client demonstrate how to use grpc go libraries to +perform unary, client streaming, server streaming and full duplex RPCs. + +Please refer to [gRPC Basics: Go] (http://www.grpc.io/docs/tutorials/basic/go.html) for more information. + +See the definition of the route guide service in proto/route_guide.proto. + +# Run the sample code +To compile and run the server, assuming you are in the root of the route_guide +folder, i.e., .../examples/route_guide/, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + +# Optional command line flags +The server and client both take optional command line flags. For example, the +client and server run without TLS by default. To enable TLS: + +```sh +$ go run server/server.go -tls=true +``` + +and + +```sh +$ go run client/client.go -tls=true +``` diff --git a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go new file mode 100644 index 00000000000..02c049097d9 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go @@ -0,0 +1,202 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries +// to perform unary, client streaming, server streaming and full duplex RPCs. +// +// It interacts with the route guide service whose definition can be found in proto/route_guide.proto. +package main + +import ( + "flag" + "io" + "math/rand" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + pb "google.golang.org/grpc/examples/route_guide/routeguide" + "google.golang.org/grpc/grpclog" +) + +var ( + tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") + caFile = flag.String("ca_file", "testdata/ca.pem", "The file containning the CA root cert file") + serverAddr = flag.String("server_addr", "127.0.0.1:10000", "The server address in the format of host:port") + serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") +) + +// printFeature gets the feature for the given point. +func printFeature(client pb.RouteGuideClient, point *pb.Point) { + grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) + feature, err := client.GetFeature(context.Background(), point) + if err != nil { + grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) + } + grpclog.Println(feature) +} + +// printFeatures lists all the features within the given bounding Rectangle. +func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { + grpclog.Printf("Looking for features within %v", rect) + stream, err := client.ListFeatures(context.Background(), rect) + if err != nil { + grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + grpclog.Println(feature) + } +} + +// runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. +func runRecordRoute(client pb.RouteGuideClient) { + // Create a random number of random points + r := rand.New(rand.NewSource(time.Now().UnixNano())) + pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points + var points []*pb.Point + for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) + } + grpclog.Printf("Traversing %d points.", len(points)) + stream, err := client.RecordRoute(context.Background()) + if err != nil { + grpclog.Fatalf("%v.RecordRoute(_) = _, %v", client, err) + } + for _, point := range points { + if err := stream.Send(point); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, point, err) + } + } + reply, err := stream.CloseAndRecv() + if err != nil { + grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + grpclog.Printf("Route summary: %v", reply) +} + +// runRouteChat receives a sequence of route notes, while sending notes for various locations. +func runRouteChat(client pb.RouteGuideClient) { + notes := []*pb.RouteNote{ + {&pb.Point{Latitude: 0, Longitude: 1}, "First message"}, + {&pb.Point{Latitude: 0, Longitude: 2}, "Second message"}, + {&pb.Point{Latitude: 0, Longitude: 3}, "Third message"}, + {&pb.Point{Latitude: 0, Longitude: 1}, "Fourth message"}, + {&pb.Point{Latitude: 0, Longitude: 2}, "Fifth message"}, + {&pb.Point{Latitude: 0, Longitude: 3}, "Sixth message"}, + } + stream, err := client.RouteChat(context.Background()) + if err != nil { + grpclog.Fatalf("%v.RouteChat(_) = _, %v", client, err) + } + waitc := make(chan struct{}) + go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + grpclog.Fatalf("Failed to receive a note : %v", err) + } + grpclog.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } + }() + for _, note := range notes { + if err := stream.Send(note); err != nil { + grpclog.Fatalf("Failed to send a note: %v", err) + } + } + stream.CloseSend() + <-waitc +} + +func randomPoint(r *rand.Rand) *pb.Point { + lat := (r.Int31n(180) - 90) * 1e7 + long := (r.Int31n(360) - 180) * 1e7 + return &pb.Point{lat, long} +} + +func main() { + flag.Parse() + var opts []grpc.DialOption + if *tls { + var sn string + if *serverHostOverride != "" { + sn = *serverHostOverride + } + var creds credentials.TransportCredentials + if *caFile != "" { + var err error + creds, err = credentials.NewClientTLSFromFile(*caFile, sn) + if err != nil { + grpclog.Fatalf("Failed to create TLS credentials %v", err) + } + } else { + creds = credentials.NewClientTLSFromCert(nil, sn) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + conn, err := grpc.Dial(*serverAddr, opts...) + if err != nil { + grpclog.Fatalf("fail to dial: %v", err) + } + defer conn.Close() + client := pb.NewRouteGuideClient(conn) + + // Looking for a valid feature + printFeature(client, &pb.Point{409146138, -746188906}) + + // Feature missing. + printFeature(client, &pb.Point{0, 0}) + + // Looking for features between 40, -75 and 42, -73. + printFeatures(client, &pb.Rectangle{&pb.Point{Latitude: 400000000, Longitude: -750000000}, &pb.Point{Latitude: 420000000, Longitude: -730000000}}) + + // RecordRoute + runRecordRoute(client) + + // RouteChat + runRouteChat(client) +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go new file mode 100644 index 00000000000..9bb1d6052d2 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go @@ -0,0 +1,488 @@ +// Code generated by protoc-gen-go. +// source: route_guide.proto +// DO NOT EDIT! + +/* +Package routeguide is a generated protocol buffer package. + +It is generated from these files: + route_guide.proto + +It has these top-level messages: + Point + Rectangle + Feature + RouteNote + RouteSummary +*/ +package routeguide + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +type Point struct { + Latitude int32 `protobuf:"varint,1,opt,name=latitude" json:"latitude,omitempty"` + Longitude int32 `protobuf:"varint,2,opt,name=longitude" json:"longitude,omitempty"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +type Rectangle struct { + // One corner of the rectangle. + Lo *Point `protobuf:"bytes,1,opt,name=lo" json:"lo,omitempty"` + // The other corner of the rectangle. + Hi *Point `protobuf:"bytes,2,opt,name=hi" json:"hi,omitempty"` +} + +func (m *Rectangle) Reset() { *m = Rectangle{} } +func (m *Rectangle) String() string { return proto.CompactTextString(m) } +func (*Rectangle) ProtoMessage() {} +func (*Rectangle) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Rectangle) GetLo() *Point { + if m != nil { + return m.Lo + } + return nil +} + +func (m *Rectangle) GetHi() *Point { + if m != nil { + return m.Hi + } + return nil +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +type Feature struct { + // The name of the feature. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The point where the feature is detected. + Location *Point `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (m *Feature) String() string { return proto.CompactTextString(m) } +func (*Feature) ProtoMessage() {} +func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Feature) GetLocation() *Point { + if m != nil { + return m.Location + } + return nil +} + +// A RouteNote is a message sent while at a given point. +type RouteNote struct { + // The location from which the message is sent. + Location *Point `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` + // The message to be sent. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *RouteNote) Reset() { *m = RouteNote{} } +func (m *RouteNote) String() string { return proto.CompactTextString(m) } +func (*RouteNote) ProtoMessage() {} +func (*RouteNote) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *RouteNote) GetLocation() *Point { + if m != nil { + return m.Location + } + return nil +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +type RouteSummary struct { + // The number of points received. + PointCount int32 `protobuf:"varint,1,opt,name=point_count,json=pointCount" json:"point_count,omitempty"` + // The number of known features passed while traversing the route. + FeatureCount int32 `protobuf:"varint,2,opt,name=feature_count,json=featureCount" json:"feature_count,omitempty"` + // The distance covered in metres. + Distance int32 `protobuf:"varint,3,opt,name=distance" json:"distance,omitempty"` + // The duration of the traversal in seconds. + ElapsedTime int32 `protobuf:"varint,4,opt,name=elapsed_time,json=elapsedTime" json:"elapsed_time,omitempty"` +} + +func (m *RouteSummary) Reset() { *m = RouteSummary{} } +func (m *RouteSummary) String() string { return proto.CompactTextString(m) } +func (*RouteSummary) ProtoMessage() {} +func (*RouteSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func init() { + proto.RegisterType((*Point)(nil), "routeguide.Point") + proto.RegisterType((*Rectangle)(nil), "routeguide.Rectangle") + proto.RegisterType((*Feature)(nil), "routeguide.Feature") + proto.RegisterType((*RouteNote)(nil), "routeguide.RouteNote") + proto.RegisterType((*RouteSummary)(nil), "routeguide.RouteSummary") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for RouteGuide service + +type RouteGuideClient interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) +} + +type routeGuideClient struct { + cc *grpc.ClientConn +} + +func NewRouteGuideClient(cc *grpc.ClientConn) RouteGuideClient { + return &routeGuideClient{cc} +} + +func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { + out := new(Feature) + err := grpc.Invoke(ctx, "/routeguide.RouteGuide/GetFeature", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[0], c.cc, "/routeguide.RouteGuide/ListFeatures", opts...) + if err != nil { + return nil, err + } + x := &routeGuideListFeaturesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RouteGuide_ListFeaturesClient interface { + Recv() (*Feature, error) + grpc.ClientStream +} + +type routeGuideListFeaturesClient struct { + grpc.ClientStream +} + +func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { + m := new(Feature) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[1], c.cc, "/routeguide.RouteGuide/RecordRoute", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRecordRouteClient{stream} + return x, nil +} + +type RouteGuide_RecordRouteClient interface { + Send(*Point) error + CloseAndRecv() (*RouteSummary, error) + grpc.ClientStream +} + +type routeGuideRecordRouteClient struct { + grpc.ClientStream +} + +func (x *routeGuideRecordRouteClient) Send(m *Point) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(RouteSummary) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[2], c.cc, "/routeguide.RouteGuide/RouteChat", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRouteChatClient{stream} + return x, nil +} + +type RouteGuide_RouteChatClient interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ClientStream +} + +type routeGuideRouteChatClient struct { + grpc.ClientStream +} + +func (x *routeGuideRouteChatClient) Send(m *RouteNote) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for RouteGuide service + +type RouteGuideServer interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + GetFeature(context.Context, *Point) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(RouteGuide_RecordRouteServer) error + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(RouteGuide_RouteChatServer) error +} + +func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { + s.RegisterService(&_RouteGuide_serviceDesc, srv) +} + +func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Point) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteGuideServer).GetFeature(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/routeguide.RouteGuide/GetFeature", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteGuideServer).GetFeature(ctx, req.(*Point)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Rectangle) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream}) +} + +type RouteGuide_ListFeaturesServer interface { + Send(*Feature) error + grpc.ServerStream +} + +type routeGuideListFeaturesServer struct { + grpc.ServerStream +} + +func (x *routeGuideListFeaturesServer) Send(m *Feature) error { + return x.ServerStream.SendMsg(m) +} + +func _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream}) +} + +type RouteGuide_RecordRouteServer interface { + SendAndClose(*RouteSummary) error + Recv() (*Point, error) + grpc.ServerStream +} + +type routeGuideRecordRouteServer struct { + grpc.ServerStream +} + +func (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteServer) Recv() (*Point, error) { + m := new(Point) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream}) +} + +type RouteGuide_RouteChatServer interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ServerStream +} + +type routeGuideRouteChatServer struct { + grpc.ServerStream +} + +func (x *routeGuideRouteChatServer) Send(m *RouteNote) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _RouteGuide_serviceDesc = grpc.ServiceDesc{ + ServiceName: "routeguide.RouteGuide", + HandlerType: (*RouteGuideServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetFeature", + Handler: _RouteGuide_GetFeature_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListFeatures", + Handler: _RouteGuide_ListFeatures_Handler, + ServerStreams: true, + }, + { + StreamName: "RecordRoute", + Handler: _RouteGuide_RecordRoute_Handler, + ClientStreams: true, + }, + { + StreamName: "RouteChat", + Handler: _RouteGuide_RouteChat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("route_guide.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0xed, 0xf4, 0xb5, 0xaf, 0x2f, 0x37, 0x79, 0x3c, 0x3a, 0x0f, 0x21, 0x54, 0x41, 0x8d, 0x9b, + 0x6e, 0x0c, 0xa5, 0x82, 0x4b, 0xc5, 0x16, 0xec, 0xa6, 0x48, 0x8d, 0xdd, 0x97, 0x31, 0x19, 0xd3, + 0x81, 0x24, 0x13, 0x92, 0x09, 0xe8, 0x07, 0xf8, 0x05, 0xfe, 0xb0, 0x93, 0xc9, 0xa4, 0x4d, 0xb5, + 0xc5, 0x5d, 0xe6, 0xdc, 0x73, 0xee, 0x3d, 0xf7, 0x5c, 0x02, 0xfd, 0x8c, 0x17, 0x82, 0xae, 0xc2, + 0x82, 0x05, 0xd4, 0x4d, 0x33, 0x2e, 0x38, 0x06, 0x05, 0x29, 0xc4, 0xb9, 0x83, 0xee, 0x82, 0xb3, + 0x44, 0xe0, 0x01, 0xfc, 0x89, 0x88, 0x60, 0xa2, 0x08, 0xa8, 0x8d, 0xce, 0xd0, 0xb0, 0xeb, 0x6d, + 0xde, 0xf8, 0x04, 0x8c, 0x88, 0x27, 0x61, 0x55, 0x6c, 0xab, 0xe2, 0x16, 0x70, 0x1e, 0xc1, 0xf0, + 0xa8, 0x2f, 0x48, 0x12, 0x46, 0x14, 0x9f, 0x43, 0x3b, 0xe2, 0xaa, 0x81, 0x39, 0xee, 0xbb, 0xdb, + 0x41, 0xae, 0x9a, 0xe2, 0xc9, 0x62, 0x49, 0x59, 0x33, 0xd5, 0x66, 0x3f, 0x65, 0xcd, 0x9c, 0x39, + 0xf4, 0xee, 0x29, 0x11, 0x45, 0x46, 0x31, 0x86, 0x4e, 0x42, 0xe2, 0xca, 0x93, 0xe1, 0xa9, 0x6f, + 0x7c, 0x29, 0xbd, 0x72, 0x5f, 0xba, 0xe3, 0xc9, 0xe1, 0x3e, 0x1b, 0x8a, 0xb3, 0x94, 0x06, 0xcb, + 0xea, 0x03, 0x17, 0xbb, 0x5a, 0xf4, 0xa3, 0x16, 0xdb, 0xd0, 0x8b, 0x69, 0x9e, 0x93, 0xb0, 0x5a, + 0xdc, 0xf0, 0xea, 0xa7, 0xf3, 0x81, 0xc0, 0x52, 0x6d, 0x9f, 0x8a, 0x38, 0x26, 0xd9, 0x1b, 0x3e, + 0x05, 0x33, 0x2d, 0xd5, 0x2b, 0x9f, 0x17, 0x89, 0xd0, 0x21, 0x82, 0x82, 0xa6, 0x25, 0x82, 0x2f, + 0xe0, 0xef, 0x4b, 0xb5, 0x95, 0xa6, 0x54, 0x51, 0x5a, 0x1a, 0xac, 0x48, 0xf2, 0x0e, 0x01, 0xcb, + 0x65, 0x9a, 0x3e, 0xb5, 0x7f, 0x55, 0x77, 0xa8, 0xdf, 0x32, 0x39, 0x8b, 0x46, 0x24, 0xcd, 0x69, + 0xb0, 0x12, 0x4c, 0x66, 0xd2, 0x51, 0x75, 0x53, 0x63, 0x4b, 0x09, 0x8d, 0xdf, 0xdb, 0x00, 0xca, + 0xd5, 0xac, 0x5c, 0x07, 0x5f, 0x03, 0xcc, 0xa8, 0xa8, 0xb3, 0xfc, 0xbe, 0xe9, 0xe0, 0x7f, 0x13, + 0xd2, 0x3c, 0xa7, 0x85, 0x6f, 0xc0, 0x9a, 0xcb, 0xa9, 0x1a, 0xc8, 0xf1, 0x51, 0x93, 0xb6, 0xb9, + 0xf6, 0x01, 0xf5, 0x08, 0x49, 0xbd, 0x29, 0x59, 0x3c, 0x0b, 0x94, 0x97, 0x7d, 0x83, 0xed, 0x9d, + 0x8e, 0x8d, 0x1c, 0x9d, 0xd6, 0x10, 0xe1, 0x5b, 0x7d, 0xb2, 0xe9, 0x9a, 0x88, 0x2f, 0xc3, 0xeb, + 0x4b, 0x0e, 0xf6, 0xc3, 0xa5, 0x7c, 0x84, 0x26, 0x23, 0x38, 0x66, 0xdc, 0x0d, 0xb3, 0xd4, 0x77, + 0xe9, 0x2b, 0x89, 0xd3, 0x88, 0xe6, 0x0d, 0xfa, 0xe4, 0xdf, 0x36, 0xa3, 0x45, 0xf9, 0x4f, 0x2c, + 0xd0, 0xf3, 0x6f, 0xf5, 0x73, 0x5c, 0x7d, 0x06, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xe4, 0xef, 0xe6, + 0x31, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto new file mode 100644 index 00000000000..5a782aa273b --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto @@ -0,0 +1,125 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.routeguide"; +option java_outer_classname = "RouteGuideProto"; + +package routeguide; + +// Interface exported by the server. +service RouteGuide { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + rpc GetFeature(Point) returns (Feature) {} + + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} + + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} + + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +} + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +message Rectangle { + // One corner of the rectangle. + Point lo = 1; + + // The other corner of the rectangle. + Point hi = 2; +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +message Feature { + // The name of the feature. + string name = 1; + + // The point where the feature is detected. + Point location = 2; +} + +// A RouteNote is a message sent while at a given point. +message RouteNote { + // The location from which the message is sent. + Point location = 1; + + // The message to be sent. + string message = 2; +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +message RouteSummary { + // The number of points received. + int32 point_count = 1; + + // The number of known features passed while traversing the route. + int32 feature_count = 2; + + // The distance covered in metres. + int32 distance = 3; + + // The duration of the traversal in seconds. + int32 elapsed_time = 4; +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go new file mode 100644 index 00000000000..c8be4970943 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go @@ -0,0 +1,239 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries +// to perform unary, client streaming, server streaming and full duplex RPCs. +// +// It implements the route guide service whose definition can be found in proto/route_guide.proto. +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "math" + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + "github.com/golang/protobuf/proto" + + pb "google.golang.org/grpc/examples/route_guide/routeguide" +) + +var ( + tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") + certFile = flag.String("cert_file", "testdata/server1.pem", "The TLS cert file") + keyFile = flag.String("key_file", "testdata/server1.key", "The TLS key file") + jsonDBFile = flag.String("json_db_file", "testdata/route_guide_db.json", "A json file containing a list of features") + port = flag.Int("port", 10000, "The server port") +) + +type routeGuideServer struct { + savedFeatures []*pb.Feature + routeNotes map[string][]*pb.RouteNote +} + +// GetFeature returns the feature at the given point. +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{"", point}, nil +} + +// ListFeatures lists all features contained within the given bounding Rectangle. +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} + +// RecordRoute records a route composited of a sequence of points. +// +// It gets a stream of points, and responds with statistics about the "trip": +// number of points, number of known features visited, total distance traveled, and +// total time spent. +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} + +// RouteChat receives a stream of message/location pairs, and responds with a stream of all +// previous messages at each of those locations. +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + if _, present := s.routeNotes[key]; !present { + s.routeNotes[key] = []*pb.RouteNote{in} + } else { + s.routeNotes[key] = append(s.routeNotes[key], in) + } + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} + +// loadFeatures loads features from a JSON file. +func (s *routeGuideServer) loadFeatures(filePath string) { + file, err := ioutil.ReadFile(filePath) + if err != nil { + grpclog.Fatalf("Failed to load default features: %v", err) + } + if err := json.Unmarshal(file, &s.savedFeatures); err != nil { + grpclog.Fatalf("Failed to load default features: %v", err) + } +} + +func toRadians(num float64) float64 { + return num * math.Pi / float64(180) +} + +// calcDistance calculates the distance between two points using the "haversine" formula. +// This code was taken from http://www.movable-type.co.uk/scripts/latlong.html. +func calcDistance(p1 *pb.Point, p2 *pb.Point) int32 { + const CordFactor float64 = 1e7 + const R float64 = float64(6371000) // metres + lat1 := float64(p1.Latitude) / CordFactor + lat2 := float64(p2.Latitude) / CordFactor + lng1 := float64(p1.Longitude) / CordFactor + lng2 := float64(p2.Longitude) / CordFactor + φ1 := toRadians(lat1) + φ2 := toRadians(lat2) + Δφ := toRadians(lat2 - lat1) + Δλ := toRadians(lng2 - lng1) + + a := math.Sin(Δφ/2)*math.Sin(Δφ/2) + + math.Cos(φ1)*math.Cos(φ2)* + math.Sin(Δλ/2)*math.Sin(Δλ/2) + c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) + + distance := R * c + return int32(distance) +} + +func inRange(point *pb.Point, rect *pb.Rectangle) bool { + left := math.Min(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) + right := math.Max(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) + top := math.Max(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) + bottom := math.Min(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) + + if float64(point.Longitude) >= left && + float64(point.Longitude) <= right && + float64(point.Latitude) >= bottom && + float64(point.Latitude) <= top { + return true + } + return false +} + +func serialize(point *pb.Point) string { + return fmt.Sprintf("%d %d", point.Latitude, point.Longitude) +} + +func newServer() *routeGuideServer { + s := new(routeGuideServer) + s.loadFeatures(*jsonDBFile) + s.routeNotes = make(map[string][]*pb.RouteNote) + return s +} + +func main() { + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + var opts []grpc.ServerOption + if *tls { + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) + if err != nil { + grpclog.Fatalf("Failed to generate credentials %v", err) + } + opts = []grpc.ServerOption{grpc.Creds(creds)} + } + grpcServer := grpc.NewServer(opts...) + pb.RegisterRouteGuideServer(grpcServer, newServer()) + grpcServer.Serve(lis) +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/ca.pem b/vendor/google.golang.org/grpc/examples/route_guide/testdata/ca.pem new file mode 100644 index 00000000000..6c8511a73c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json b/vendor/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json new file mode 100644 index 00000000000..9d6a980ab7d --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json @@ -0,0 +1,601 @@ +[{ + "location": { + "latitude": 407838351, + "longitude": -746143763 + }, + "name": "Patriots Path, Mendham, NJ 07945, USA" +}, { + "location": { + "latitude": 408122808, + "longitude": -743999179 + }, + "name": "101 New Jersey 10, Whippany, NJ 07981, USA" +}, { + "location": { + "latitude": 413628156, + "longitude": -749015468 + }, + "name": "U.S. 6, Shohola, PA 18458, USA" +}, { + "location": { + "latitude": 419999544, + "longitude": -740371136 + }, + "name": "5 Conners Road, Kingston, NY 12401, USA" +}, { + "location": { + "latitude": 414008389, + "longitude": -743951297 + }, + "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" +}, { + "location": { + "latitude": 419611318, + "longitude": -746524769 + }, + "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" +}, { + "location": { + "latitude": 406109563, + "longitude": -742186778 + }, + "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" +}, { + "location": { + "latitude": 416802456, + "longitude": -742370183 + }, + "name": "352 South Mountain Road, Wallkill, NY 12589, USA" +}, { + "location": { + "latitude": 412950425, + "longitude": -741077389 + }, + "name": "Bailey Turn Road, Harriman, NY 10926, USA" +}, { + "location": { + "latitude": 412144655, + "longitude": -743949739 + }, + "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" +}, { + "location": { + "latitude": 415736605, + "longitude": -742847522 + }, + "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" +}, { + "location": { + "latitude": 413843930, + "longitude": -740501726 + }, + "name": "162 Merrill Road, Highland Mills, NY 10930, USA" +}, { + "location": { + "latitude": 410873075, + "longitude": -744459023 + }, + "name": "Clinton Road, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 412346009, + "longitude": -744026814 + }, + "name": "16 Old Brook Lane, Warwick, NY 10990, USA" +}, { + "location": { + "latitude": 402948455, + "longitude": -747903913 + }, + "name": "3 Drake Lane, Pennington, NJ 08534, USA" +}, { + "location": { + "latitude": 406337092, + "longitude": -740122226 + }, + "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" +}, { + "location": { + "latitude": 406421967, + "longitude": -747727624 + }, + "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" +}, { + "location": { + "latitude": 416318082, + "longitude": -749677716 + }, + "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" +}, { + "location": { + "latitude": 415301720, + "longitude": -748416257 + }, + "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" +}, { + "location": { + "latitude": 402647019, + "longitude": -747071791 + }, + "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" +}, { + "location": { + "latitude": 412567807, + "longitude": -741058078 + }, + "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" +}, { + "location": { + "latitude": 416855156, + "longitude": -744420597 + }, + "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" +}, { + "location": { + "latitude": 404663628, + "longitude": -744820157 + }, + "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" +}, { + "location": { + "latitude": 407113723, + "longitude": -749746483 + }, + "name": "" +}, { + "location": { + "latitude": 402133926, + "longitude": -743613249 + }, + "name": "" +}, { + "location": { + "latitude": 400273442, + "longitude": -741220915 + }, + "name": "" +}, { + "location": { + "latitude": 411236786, + "longitude": -744070769 + }, + "name": "" +}, { + "location": { + "latitude": 411633782, + "longitude": -746784970 + }, + "name": "211-225 Plains Road, Augusta, NJ 07822, USA" +}, { + "location": { + "latitude": 415830701, + "longitude": -742952812 + }, + "name": "" +}, { + "location": { + "latitude": 413447164, + "longitude": -748712898 + }, + "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" +}, { + "location": { + "latitude": 405047245, + "longitude": -749800722 + }, + "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" +}, { + "location": { + "latitude": 418858923, + "longitude": -746156790 + }, + "name": "" +}, { + "location": { + "latitude": 417951888, + "longitude": -748484944 + }, + "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" +}, { + "location": { + "latitude": 407033786, + "longitude": -743977337 + }, + "name": "26 East 3rd Street, New Providence, NJ 07974, USA" +}, { + "location": { + "latitude": 417548014, + "longitude": -740075041 + }, + "name": "" +}, { + "location": { + "latitude": 410395868, + "longitude": -744972325 + }, + "name": "" +}, { + "location": { + "latitude": 404615353, + "longitude": -745129803 + }, + "name": "" +}, { + "location": { + "latitude": 406589790, + "longitude": -743560121 + }, + "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" +}, { + "location": { + "latitude": 414653148, + "longitude": -740477477 + }, + "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" +}, { + "location": { + "latitude": 405957808, + "longitude": -743255336 + }, + "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" +}, { + "location": { + "latitude": 411733589, + "longitude": -741648093 + }, + "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" +}, { + "location": { + "latitude": 412676291, + "longitude": -742606606 + }, + "name": "1270 Lakes Road, Monroe, NY 10950, USA" +}, { + "location": { + "latitude": 409224445, + "longitude": -748286738 + }, + "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" +}, { + "location": { + "latitude": 406523420, + "longitude": -742135517 + }, + "name": "652 Garden Street, Elizabeth, NJ 07202, USA" +}, { + "location": { + "latitude": 401827388, + "longitude": -740294537 + }, + "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" +}, { + "location": { + "latitude": 410564152, + "longitude": -743685054 + }, + "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 408472324, + "longitude": -740726046 + }, + "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" +}, { + "location": { + "latitude": 412452168, + "longitude": -740214052 + }, + "name": "5 White Oak Lane, Stony Point, NY 10980, USA" +}, { + "location": { + "latitude": 409146138, + "longitude": -746188906 + }, + "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" +}, { + "location": { + "latitude": 404701380, + "longitude": -744781745 + }, + "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 409642566, + "longitude": -746017679 + }, + "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" +}, { + "location": { + "latitude": 408031728, + "longitude": -748645385 + }, + "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" +}, { + "location": { + "latitude": 413700272, + "longitude": -742135189 + }, + "name": "367 Prospect Road, Chester, NY 10918, USA" +}, { + "location": { + "latitude": 404310607, + "longitude": -740282632 + }, + "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" +}, { + "location": { + "latitude": 409319800, + "longitude": -746201391 + }, + "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" +}, { + "location": { + "latitude": 406685311, + "longitude": -742108603 + }, + "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" +}, { + "location": { + "latitude": 419018117, + "longitude": -749142781 + }, + "name": "43 Dreher Road, Roscoe, NY 12776, USA" +}, { + "location": { + "latitude": 412856162, + "longitude": -745148837 + }, + "name": "Swan Street, Pine Island, NY 10969, USA" +}, { + "location": { + "latitude": 416560744, + "longitude": -746721964 + }, + "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" +}, { + "location": { + "latitude": 405314270, + "longitude": -749836354 + }, + "name": "" +}, { + "location": { + "latitude": 414219548, + "longitude": -743327440 + }, + "name": "" +}, { + "location": { + "latitude": 415534177, + "longitude": -742900616 + }, + "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" +}, { + "location": { + "latitude": 406898530, + "longitude": -749127080 + }, + "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" +}, { + "location": { + "latitude": 407586880, + "longitude": -741670168 + }, + "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" +}, { + "location": { + "latitude": 400106455, + "longitude": -742870190 + }, + "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" +}, { + "location": { + "latitude": 400066188, + "longitude": -746793294 + }, + "name": "" +}, { + "location": { + "latitude": 418803880, + "longitude": -744102673 + }, + "name": "40 Mountain Road, Napanoch, NY 12458, USA" +}, { + "location": { + "latitude": 414204288, + "longitude": -747895140 + }, + "name": "" +}, { + "location": { + "latitude": 414777405, + "longitude": -740615601 + }, + "name": "" +}, { + "location": { + "latitude": 415464475, + "longitude": -747175374 + }, + "name": "48 North Road, Forestburgh, NY 12777, USA" +}, { + "location": { + "latitude": 404062378, + "longitude": -746376177 + }, + "name": "" +}, { + "location": { + "latitude": 405688272, + "longitude": -749285130 + }, + "name": "" +}, { + "location": { + "latitude": 400342070, + "longitude": -748788996 + }, + "name": "" +}, { + "location": { + "latitude": 401809022, + "longitude": -744157964 + }, + "name": "" +}, { + "location": { + "latitude": 404226644, + "longitude": -740517141 + }, + "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" +}, { + "location": { + "latitude": 410322033, + "longitude": -747871659 + }, + "name": "" +}, { + "location": { + "latitude": 407100674, + "longitude": -747742727 + }, + "name": "" +}, { + "location": { + "latitude": 418811433, + "longitude": -741718005 + }, + "name": "213 Bush Road, Stone Ridge, NY 12484, USA" +}, { + "location": { + "latitude": 415034302, + "longitude": -743850945 + }, + "name": "" +}, { + "location": { + "latitude": 411349992, + "longitude": -743694161 + }, + "name": "" +}, { + "location": { + "latitude": 404839914, + "longitude": -744759616 + }, + "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 414638017, + "longitude": -745957854 + }, + "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" +}, { + "location": { + "latitude": 412127800, + "longitude": -740173578 + }, + "name": "" +}, { + "location": { + "latitude": 401263460, + "longitude": -747964303 + }, + "name": "" +}, { + "location": { + "latitude": 412843391, + "longitude": -749086026 + }, + "name": "" +}, { + "location": { + "latitude": 418512773, + "longitude": -743067823 + }, + "name": "" +}, { + "location": { + "latitude": 404318328, + "longitude": -740835638 + }, + "name": "42-102 Main Street, Belford, NJ 07718, USA" +}, { + "location": { + "latitude": 419020746, + "longitude": -741172328 + }, + "name": "" +}, { + "location": { + "latitude": 404080723, + "longitude": -746119569 + }, + "name": "" +}, { + "location": { + "latitude": 401012643, + "longitude": -744035134 + }, + "name": "" +}, { + "location": { + "latitude": 404306372, + "longitude": -741079661 + }, + "name": "" +}, { + "location": { + "latitude": 403966326, + "longitude": -748519297 + }, + "name": "" +}, { + "location": { + "latitude": 405002031, + "longitude": -748407866 + }, + "name": "" +}, { + "location": { + "latitude": 409532885, + "longitude": -742200683 + }, + "name": "" +}, { + "location": { + "latitude": 416851321, + "longitude": -742674555 + }, + "name": "" +}, { + "location": { + "latitude": 406411633, + "longitude": -741722051 + }, + "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" +}, { + "location": { + "latitude": 413069058, + "longitude": -744597778 + }, + "name": "261 Van Sickle Road, Goshen, NY 10924, USA" +}, { + "location": { + "latitude": 418465462, + "longitude": -746859398 + }, + "name": "" +}, { + "location": { + "latitude": 411733222, + "longitude": -744228360 + }, + "name": "" +}, { + "location": { + "latitude": 410248224, + "longitude": -747127767 + }, + "name": "3 Hasta Way, Newton, NJ 07860, USA" +}] diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.key b/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.key new file mode 100644 index 00000000000..143a5b87658 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.pem b/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.pem new file mode 100644 index 00000000000..f3d43fcc5be --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go b/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go new file mode 100644 index 00000000000..53e3c539fb0 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package glogger defines glog-based logging for grpc. +*/ +package glogger + +import ( + "github.com/golang/glog" + "google.golang.org/grpc/grpclog" +) + +func init() { + grpclog.SetLogger(&glogger{}) +} + +type glogger struct{} + +func (g *glogger) Fatal(args ...interface{}) { + glog.Fatal(args...) +} + +func (g *glogger) Fatalf(format string, args ...interface{}) { + glog.Fatalf(format, args...) +} + +func (g *glogger) Fatalln(args ...interface{}) { + glog.Fatalln(args...) +} + +func (g *glogger) Print(args ...interface{}) { + glog.Info(args...) +} + +func (g *glogger) Printf(format string, args ...interface{}) { + glog.Infof(format, args...) +} + +func (g *glogger) Println(args ...interface{}) { + glog.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ec089f70f81..3b2933079e9 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -42,6 +42,8 @@ import ( ) // Use golang's standard logger by default. +// Access is not mutex-protected: do not modify except in init() +// functions. var logger Logger = log.New(os.Stderr, "", log.LstdFlags) // Logger mimics golang's standard Logger as an interface. @@ -54,7 +56,8 @@ type Logger interface { Println(args ...interface{}) } -// SetLogger sets the logger that is used in grpc. +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. func SetLogger(l Logger) { logger = l } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000000..0e6a9100af2 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,176 @@ +// Code generated by protoc-gen-go. +// source: health.proto +// DO NOT EDIT! + +/* +Package grpc_health_v1 is a generated protocol buffer package. + +It is generated from these files: + health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("health.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, + 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, + 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, + 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, + 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, + 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, + 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, + 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, + 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, + 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, + 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, + 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, + 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto new file mode 100644 index 00000000000..e2dc0889258 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package grpc.health.v1; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service Health{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go new file mode 100644 index 00000000000..f74fd69bc39 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/health.go @@ -0,0 +1,49 @@ +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +type HealthServer struct { + mu sync.Mutex + // statusMap stores the serving status of the services this HealthServer monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +func NewHealthServer() *HealthServer { + return &HealthServer{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +func (s *HealthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, grpc.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *HealthServer) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000000..588f59e5abf --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000000..5489143a85c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,49 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/interop/client/client.go b/vendor/google.golang.org/grpc/interop/client/client.go new file mode 100644 index 00000000000..98f6cfec3ae --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/client/client.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "flag" + "net" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") + testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") + serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") + oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") + defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") + serverHost = flag.String("server_host", "127.0.0.1", "The server host name") + serverPort = flag.Int("server_port", 10000, "The server port number") + tlsServerName = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + testCase = flag.String("test_case", "large_unary", + `Configure different test cases. Valid options are: + empty_unary : empty (zero bytes) request and response; + large_unary : single request and (large) response; + client_streaming : request streaming with single response; + server_streaming : single request with response streaming; + ping_pong : full-duplex streaming; + empty_stream : full-duplex streaming with zero message; + timeout_on_sleeping_server: fullduplex streaming on a sleeping server; + compute_engine_creds: large_unary with compute engine auth; + service_account_creds: large_unary with service account auth; + jwt_token_creds: large_unary with jwt token auth; + per_rpc_creds: large_unary with per rpc token; + oauth2_auth_token: large_unary with oauth2 token auth; + cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; + cancel_after_first_response: cancellation after receiving 1st message from the server.`) + + // The test CA root cert file + testCAFile = "testdata/ca.pem" +) + +func main() { + flag.Parse() + serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) + var opts []grpc.DialOption + if *useTLS { + var sn string + if *tlsServerName != "" { + sn = *tlsServerName + } + var creds credentials.TransportCredentials + if *testCA { + var err error + creds, err = credentials.NewClientTLSFromFile(testCAFile, sn) + if err != nil { + grpclog.Fatalf("Failed to create TLS credentials %v", err) + } + } else { + creds = credentials.NewClientTLSFromCert(nil, sn) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + if *testCase == "compute_engine_creds" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) + } else if *testCase == "service_account_creds" { + jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "jwt_token_creds" { + jwtCreds, err := oauth.NewJWTAccessFromFile(*serviceAccountKeyFile) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "oauth2_auth_token" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(interop.GetToken(*serviceAccountKeyFile, *oauthScope)))) + } + } else { + opts = append(opts, grpc.WithInsecure()) + } + conn, err := grpc.Dial(serverAddr, opts...) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + tc := testpb.NewTestServiceClient(conn) + switch *testCase { + case "empty_unary": + interop.DoEmptyUnaryCall(tc) + grpclog.Println("EmptyUnaryCall done") + case "large_unary": + interop.DoLargeUnaryCall(tc) + grpclog.Println("LargeUnaryCall done") + case "client_streaming": + interop.DoClientStreaming(tc) + grpclog.Println("ClientStreaming done") + case "server_streaming": + interop.DoServerStreaming(tc) + grpclog.Println("ServerStreaming done") + case "ping_pong": + interop.DoPingPong(tc) + grpclog.Println("Pingpong done") + case "empty_stream": + interop.DoEmptyStream(tc) + grpclog.Println("Emptystream done") + case "timeout_on_sleeping_server": + interop.DoTimeoutOnSleepingServer(tc) + grpclog.Println("TimeoutOnSleepingServer done") + case "compute_engine_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") + } + interop.DoComputeEngineCreds(tc, *defaultServiceAccount, *oauthScope) + grpclog.Println("ComputeEngineCreds done") + case "service_account_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") + } + interop.DoServiceAccountCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("ServiceAccountCreds done") + case "jwt_token_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute jwt_token_creds test case.") + } + interop.DoJWTTokenCreds(tc, *serviceAccountKeyFile) + grpclog.Println("JWTtokenCreds done") + case "per_rpc_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute per_rpc_creds test case.") + } + interop.DoPerRPCCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("PerRPCCreds done") + case "oauth2_auth_token": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute oauth2_auth_token test case.") + } + interop.DoOauth2TokenCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("Oauth2TokenCreds done") + case "cancel_after_begin": + interop.DoCancelAfterBegin(tc) + grpclog.Println("CancelAfterBegin done") + case "cancel_after_first_response": + interop.DoCancelAfterFirstResponse(tc) + grpclog.Println("CancelAfterFirstResponse done") + default: + grpclog.Fatal("Unsupported test case: ", *testCase) + } +} diff --git a/vendor/google.golang.org/grpc/interop/client/testdata/ca.pem b/vendor/google.golang.org/grpc/interop/client/testdata/ca.pem new file mode 100644 index 00000000000..6c8511a73c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/client/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/client/testdata/server1.key b/vendor/google.golang.org/grpc/interop/client/testdata/server1.key new file mode 100644 index 00000000000..143a5b87658 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/client/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/interop/client/testdata/server1.pem b/vendor/google.golang.org/grpc/interop/client/testdata/server1.pem new file mode 100644 index 00000000000..f3d43fcc5be --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/client/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go new file mode 100755 index 00000000000..0ceb12df561 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -0,0 +1,788 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + Empty + Payload + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) Enum() *PayloadType { + p := new(PayloadType) + *p = x + return p +} +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (x *PayloadType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") + if err != nil { + return err + } + *x = PayloadType(value) + return nil +} +func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Payload) GetType() PayloadType { + if m != nil && m.Type != nil { + return *m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername *bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil && m.FillUsername != nil { + return *m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil && m.FillOauthScope != nil { + return *m.FillOauthScope + } + return false +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleResponse) GetUsername() string { + if m != nil && m.Username != nil { + return *m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil && m.OauthScope != nil { + return *m.OauthScope + } + return "" +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} +func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} +func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil && m.AggregatedPayloadSize != nil { + return *m.AggregatedPayloadSize + } + return 0 +} + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} +func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ResponseParameters) GetSize() int32 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil && m.IntervalUs != nil { + return *m.IntervalUs + } + return 0 +} + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} +func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} +func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") + proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") + proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") + proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") + proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") + proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for TestService service + +type TestServiceClient interface { + // One empty request followed by one empty response. + EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingOutputCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_StreamingOutputCallClient interface { + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingOutputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingInputCallClient{stream} + return x, nil +} + +type TestService_StreamingInputCallClient interface { + Send(*StreamingInputCallRequest) error + CloseAndRecv() (*StreamingInputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingInputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamingInputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceFullDuplexCallClient{stream} + return x, nil +} + +type TestService_FullDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceFullDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceHalfDuplexCallClient{stream} + return x, nil +} + +type TestService_HalfDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceHalfDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One empty request followed by one empty response. + EmptyCall(context.Context, *Empty) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(TestService_StreamingInputCallServer) error + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(TestService_FullDuplexCallServer) error + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(TestService_HalfDuplexCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).EmptyCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/EmptyCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingOutputCallRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +} + +type TestService_StreamingOutputCallServer interface { + Send(*StreamingOutputCallResponse) error + grpc.ServerStream +} + +type testServiceStreamingOutputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) +} + +type TestService_StreamingInputCallServer interface { + SendAndClose(*StreamingInputCallResponse) error + Recv() (*StreamingInputCallRequest, error) + grpc.ServerStream +} + +type testServiceStreamingInputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { + m := new(StreamingInputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) +} + +type TestService_FullDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceFullDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) +} + +type TestService_HalfDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceHalfDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EmptyCall", + Handler: _TestService_EmptyCall_Handler, + }, + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingOutputCall", + Handler: _TestService_StreamingOutputCall_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingInputCall", + Handler: _TestService_StreamingInputCall_Handler, + ClientStreams: true, + }, + { + StreamName: "FullDuplexCall", + Handler: _TestService_FullDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "HalfDuplexCall", + Handler: _TestService_HalfDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x54, 0x51, 0x6f, 0xd2, 0x50, + 0x14, 0xb6, 0x03, 0x64, 0x1c, 0x58, 0x43, 0x0e, 0x59, 0x64, 0x9d, 0x89, 0x4b, 0x7d, 0xb0, 0x9a, + 0x88, 0x86, 0x44, 0x1f, 0x35, 0x73, 0x63, 0x71, 0x09, 0x03, 0x6c, 0xe1, 0x99, 0x5c, 0xe1, 0x0e, + 0x9b, 0x94, 0xb6, 0xb6, 0xb7, 0x46, 0x7c, 0xf0, 0x8f, 0xf9, 0x67, 0xfc, 0x11, 0xfe, 0x00, 0xef, + 0xbd, 0x6d, 0xa1, 0x40, 0x17, 0x99, 0xc6, 0xbd, 0xb5, 0xdf, 0xf9, 0xce, 0x77, 0xbe, 0xef, 0x9e, + 0xdb, 0x02, 0x30, 0x1a, 0xb2, 0x96, 0x1f, 0x78, 0xcc, 0xc3, 0xda, 0x2c, 0xf0, 0x27, 0x2d, 0x01, + 0xd8, 0xee, 0x4c, 0x2f, 0x43, 0xa9, 0x33, 0xf7, 0xd9, 0x42, 0xef, 0x42, 0x79, 0x40, 0x16, 0x8e, + 0x47, 0xa6, 0xf8, 0x1c, 0x8a, 0x6c, 0xe1, 0xd3, 0xa6, 0x72, 0xa2, 0x18, 0x6a, 0xfb, 0xa8, 0x95, + 0x6d, 0x68, 0x25, 0xa4, 0x21, 0x27, 0x98, 0x92, 0x86, 0x08, 0xc5, 0x8f, 0xde, 0x74, 0xd1, 0xdc, + 0xe3, 0xf4, 0x9a, 0x29, 0x9f, 0xf5, 0x5f, 0x0a, 0x1c, 0x58, 0xf6, 0xdc, 0x77, 0xa8, 0x49, 0x3f, + 0x47, 0xbc, 0x15, 0xdf, 0xc0, 0x41, 0x40, 0x43, 0xdf, 0x73, 0x43, 0x3a, 0xde, 0x4d, 0xbd, 0x96, + 0xf2, 0xc5, 0x1b, 0x3e, 0xce, 0xf4, 0x87, 0xf6, 0x37, 0x2a, 0xc7, 0x95, 0x56, 0x24, 0x8b, 0x63, + 0xf8, 0x02, 0xca, 0x7e, 0xac, 0xd0, 0x2c, 0xf0, 0x72, 0xb5, 0x7d, 0x98, 0x2b, 0x6f, 0xa6, 0x2c, + 0xa1, 0x7a, 0x6d, 0x3b, 0xce, 0x38, 0x0a, 0x69, 0xe0, 0x92, 0x39, 0x6d, 0x16, 0x79, 0xdb, 0xbe, + 0x59, 0x13, 0xe0, 0x28, 0xc1, 0xd0, 0x80, 0xba, 0x24, 0x79, 0x24, 0x62, 0x9f, 0xc6, 0xe1, 0xc4, + 0xe3, 0xee, 0x4b, 0x92, 0xa7, 0x0a, 0xbc, 0x2f, 0x60, 0x4b, 0xa0, 0xfa, 0x77, 0x50, 0xd3, 0xd4, + 0xb1, 0xab, 0xac, 0x23, 0x65, 0x27, 0x47, 0x1a, 0xec, 0x2f, 0xcd, 0x88, 0x88, 0x15, 0x73, 0xf9, + 0x8e, 0x8f, 0xa0, 0x9a, 0xf5, 0x50, 0x90, 0x65, 0xf0, 0x56, 0xf3, 0xbb, 0x70, 0x64, 0xb1, 0x80, + 0x92, 0x39, 0x97, 0xbe, 0x74, 0xfd, 0x88, 0x9d, 0x11, 0xc7, 0x49, 0x37, 0x70, 0x5b, 0x2b, 0xfa, + 0x10, 0xb4, 0x3c, 0xb5, 0x24, 0xd9, 0x6b, 0x78, 0x40, 0x66, 0xb3, 0x80, 0xce, 0x08, 0xa3, 0xd3, + 0x71, 0xd2, 0x13, 0xaf, 0x46, 0x91, 0xab, 0x39, 0x5c, 0x95, 0x13, 0x69, 0xb1, 0x23, 0xfd, 0x12, + 0x30, 0xd5, 0x18, 0x90, 0x80, 0xc7, 0x62, 0x34, 0x08, 0xc5, 0x25, 0xca, 0xb4, 0xca, 0x67, 0x11, + 0xd7, 0x76, 0x79, 0xf5, 0x0b, 0x11, 0x0b, 0x4a, 0x16, 0x0e, 0x29, 0x34, 0x0a, 0xf5, 0x9f, 0x4a, + 0xc6, 0x61, 0x3f, 0x62, 0x1b, 0x81, 0xff, 0xf5, 0xca, 0x7d, 0x80, 0xc6, 0xb2, 0xdf, 0x5f, 0x5a, + 0xe5, 0x3e, 0x0a, 0xfc, 0xf0, 0x4e, 0xd6, 0x55, 0xb6, 0x23, 0x99, 0x18, 0x6c, 0xc7, 0xbc, 0xed, + 0x05, 0xd5, 0x7b, 0x70, 0x9c, 0x9b, 0xf0, 0x2f, 0xaf, 0xd7, 0xb3, 0xb7, 0x50, 0xcd, 0x04, 0xc6, + 0x3a, 0xd4, 0xce, 0xfa, 0x57, 0x03, 0xb3, 0x63, 0x59, 0xa7, 0xef, 0xba, 0x9d, 0xfa, 0x3d, 0xbe, + 0x08, 0x75, 0xd4, 0x5b, 0xc3, 0x14, 0x04, 0xb8, 0x6f, 0x9e, 0xf6, 0xce, 0xfb, 0x57, 0xf5, 0xbd, + 0xf6, 0x8f, 0x22, 0x54, 0x87, 0x5c, 0xdd, 0xe2, 0x4b, 0xb0, 0x27, 0x14, 0x5f, 0x41, 0x45, 0xfe, + 0x40, 0x84, 0x2d, 0x6c, 0xac, 0x4f, 0x97, 0x05, 0x2d, 0x0f, 0xc4, 0x0b, 0xa8, 0x8c, 0x5c, 0x12, + 0xc4, 0x6d, 0xc7, 0xeb, 0x8c, 0xb5, 0x1f, 0x87, 0xf6, 0x30, 0xbf, 0x98, 0x1c, 0x80, 0x03, 0x8d, + 0x9c, 0xf3, 0x41, 0x63, 0xa3, 0xe9, 0xc6, 0x4b, 0xa2, 0x3d, 0xdd, 0x81, 0x19, 0xcf, 0x7a, 0xa9, + 0xa0, 0x0d, 0xb8, 0xfd, 0x45, 0xe0, 0x93, 0x1b, 0x24, 0x36, 0xbf, 0x40, 0xcd, 0xf8, 0x33, 0x31, + 0x1e, 0x65, 0x88, 0x51, 0xea, 0x45, 0xe4, 0x38, 0xe7, 0x11, 0x4f, 0xfb, 0xf5, 0xbf, 0x65, 0x32, + 0x14, 0x99, 0x4a, 0x7d, 0x4f, 0x9c, 0xeb, 0x3b, 0x18, 0xf5, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x4c, + 0x41, 0xfe, 0xb6, 0x89, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto new file mode 100644 index 00000000000..b5bfe053789 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto @@ -0,0 +1,140 @@ +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto2"; + +package grpc.testing; + +message Empty {} + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + optional PayloadType type = 1; + // Primary contents of payload. + optional bytes body = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + optional PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 response_size = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; + + // Whether SimpleResponse should include username. + optional bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + optional bool fill_oauth_scope = 5; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + optional Payload payload = 1; + + // The user the request came from, for verifying authentication was + // successful when the client expected it. + optional string username = 2; + + // OAuth scope. + optional string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + optional Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + optional int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + optional int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + optional PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + optional Payload payload = 1; +} + +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. +service TestService { + // One empty request followed by one empty response. + rpc EmptyCall(Empty) returns (Empty); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + rpc StreamingOutputCall(StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + rpc StreamingInputCall(stream StreamingInputCallRequest) + returns (StreamingInputCallResponse); + + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + rpc FullDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + rpc HalfDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); +} diff --git a/vendor/google.golang.org/grpc/interop/server/server.go b/vendor/google.golang.org/grpc/interop/server/server.go new file mode 100644 index 00000000000..36ebcb6418e --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/server/server.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "flag" + "net" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") + certFile = flag.String("tls_cert_file", "testdata/server1.pem", "The TLS cert file") + keyFile = flag.String("tls_key_file", "testdata/server1.key", "The TLS key file") + port = flag.Int("port", 10000, "The server port") +) + +func main() { + flag.Parse() + p := strconv.Itoa(*port) + lis, err := net.Listen("tcp", ":"+p) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + var opts []grpc.ServerOption + if *useTLS { + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) + if err != nil { + grpclog.Fatalf("Failed to generate credentials %v", err) + } + opts = []grpc.ServerOption{grpc.Creds(creds)} + } + server := grpc.NewServer(opts...) + testpb.RegisterTestServiceServer(server, interop.NewTestServer()) + server.Serve(lis) +} diff --git a/vendor/google.golang.org/grpc/interop/server/testdata/ca.pem b/vendor/google.golang.org/grpc/interop/server/testdata/ca.pem new file mode 100644 index 00000000000..6c8511a73c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/server/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/server/testdata/server1.key b/vendor/google.golang.org/grpc/interop/server/testdata/server1.key new file mode 100644 index 00000000000..143a5b87658 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/server/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/interop/server/testdata/server1.pem b/vendor/google.golang.org/grpc/interop/server/testdata/server1.pem new file mode 100644 index 00000000000..f3d43fcc5be --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/server/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/test_utils.go b/vendor/google.golang.org/grpc/interop/test_utils.go new file mode 100644 index 00000000000..23340ab8146 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/test_utils.go @@ -0,0 +1,592 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package interop + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/metadata" +) + +var ( + reqSizes = []int{27182, 8, 1828, 45904} + respSizes = []int{31415, 9, 2653, 58979} + largeReqSize = 271828 + largeRespSize = 314159 +) + +func clientNewPayload(t testpb.PayloadType, size int) *testpb.Payload { + if size < 0 { + grpclog.Fatalf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + default: + grpclog.Fatalf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + } +} + +// DoEmptyUnaryCall performs a unary RPC with empty request and response messages. +func DoEmptyUnaryCall(tc testpb.TestServiceClient) { + reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) + if err != nil { + grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) + } + if !proto.Equal(&testpb.Empty{}, reply) { + grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) + } +} + +// DoLargeUnaryCall performs a unary RPC with large payload in the request and response. +func DoLargeUnaryCall(tc testpb.TestServiceClient) { + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + t := reply.GetPayload().GetType() + s := len(reply.GetPayload().GetBody()) + if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { + grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) + } +} + +// DoClientStreaming performs a client streaming RPC. +func DoClientStreaming(tc testpb.TestServiceClient) { + stream, err := tc.StreamingInputCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + } + var sum int + for _, s := range reqSizes { + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, s) + req := &testpb.StreamingInputCallRequest{ + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + sum += s + grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) + + } + reply, err := stream.CloseAndRecv() + if err != nil { + grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + if reply.GetAggregatedPayloadSize() != int32(sum) { + grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) + } +} + +// DoServerStreaming performs a server streaming RPC. +func DoServerStreaming(tc testpb.TestServiceClient) { + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + stream, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) + } + var rpcStatus error + var respCnt int + var index int + for { + reply, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + t := reply.GetPayload().GetType() + if t != testpb.PayloadType_COMPRESSABLE { + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + respCnt++ + } + if rpcStatus != io.EOF { + grpclog.Fatalf("Failed to finish the server streaming rpc: %v", err) + } + if respCnt != len(respSizes) { + grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) + } +} + +// DoPingPong performs ping-pong style bi-directional streaming RPC. +func DoPingPong(tc testpb.TestServiceClient) { + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + reply, err := stream.Recv() + if err != nil { + grpclog.Fatalf("%v.Recv() = %v", stream, err) + } + t := reply.GetPayload().GetType() + if t != testpb.PayloadType_COMPRESSABLE { + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) + } +} + +// DoEmptyStream sets up a bi-directional streaming with zero message. +func DoEmptyStream(tc testpb.TestServiceClient) { + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) + } +} + +// DoTimeoutOnSleepingServer performs an RPC on a sleep server which causes RPC timeout. +func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient) { + ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + if grpc.Code(err) == codes.DeadlineExceeded { + return + } + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { + grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) + } +} + +// DoComputeEngineCreds performs a unary RPC with compute engine auth. +func DoComputeEngineCreds(tc testpb.TestServiceClient, serviceAccount, oauthScope string) { + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if user != serviceAccount { + grpclog.Fatalf("Got user name %q, want %q.", user, serviceAccount) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +func getServiceAccountJSONKey(keyFile string) []byte { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + grpclog.Fatalf("Failed to read the service account key file: %v", err) + } + return jsonKey +} + +// DoServiceAccountCreds performs a unary RPC with service account auth. +func DoServiceAccountCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +// DoJWTTokenCreds performs a unary RPC with JWT token auth. +func DoJWTTokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile string) { + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + user := reply.GetUsername() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } +} + +// GetToken obtains an OAUTH token from the input. +func GetToken(serviceAccountKeyFile string, oauthScope string) *oauth2.Token { + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + config, err := google.JWTConfigFromJSON(jsonKey, oauthScope) + if err != nil { + grpclog.Fatalf("Failed to get the config: %v", err) + } + token, err := config.TokenSource(context.Background()).Token() + if err != nil { + grpclog.Fatalf("Failed to get the token: %v", err) + } + return token +} + +// DoOauth2TokenCreds performs a unary RPC with OAUTH2 token auth. +func DoOauth2TokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +// DoPerRPCCreds performs a unary RPC with per RPC OAUTH2 token. +func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + token := GetToken(serviceAccountKeyFile, oauthScope) + kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} + ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) + reply, err := tc.UnaryCall(ctx, req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +var ( + testMetadata = metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } +) + +// DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent. +func DoCancelAfterBegin(tc testpb.TestServiceClient) { + ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) + stream, err := tc.StreamingInputCall(ctx) + if err != nil { + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + } + cancel() + _, err = stream.CloseAndRecv() + if grpc.Code(err) != codes.Canceled { + grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + } +} + +// DoCancelAfterFirstResponse cancels the RPC after receiving the first message from the server. +func DoCancelAfterFirstResponse(tc testpb.TestServiceClient) { + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(31415), + }, + } + pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + grpclog.Fatalf("%v.Recv() = %v", stream, err) + } + cancel() + if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { + grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + } +} + +type testServer struct { +} + +// NewTestServer creates a test server for test service. +func NewTestServer() testpb.TestServiceServer { + return &testServer{} +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return new(testpb.Empty), nil +} + +func serverNewPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { + if size < 0 { + return nil, fmt.Errorf("requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + return nil, fmt.Errorf("payloadType UNCOMPRESSABLE is not supported") + default: + return nil, fmt.Errorf("unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + }, nil +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + pl, err := serverNewPayload(in.GetResponseType(), in.GetResponseSize()) + if err != nil { + return nil, err + } + return &testpb.SimpleResponse{ + Payload: pl, + }, nil +} + +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + cs := args.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := serverNewPayload(args.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + return nil +} + +func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + var sum int + for { + in, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&testpb.StreamingInputCallResponse{ + AggregatedPayloadSize: proto.Int32(int32(sum)), + }) + } + if err != nil { + return err + } + p := in.GetPayload().GetBody() + sum += len(p) + } +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + cs := in.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := serverNewPayload(in.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + } +} + +func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { + var msgBuf []*testpb.StreamingOutputCallRequest + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + break + } + if err != nil { + return err + } + msgBuf = append(msgBuf, in) + } + for _, m := range msgBuf { + cs := m.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := serverNewPayload(m.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index adebc38f8ea..52070dbeca2 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -46,27 +46,16 @@ const ( binHdrSuffix = "-bin" ) -// grpc-http2 requires ASCII header key and value (more detail can be found in -// "Requests" subsection in go/grpc-http2). -func isASCII(s string) bool { - for _, c := range s { - if c > 127 { - return false - } - } - return true -} - // encodeKeyValue encodes key and value qualified for transmission via gRPC. // Transmitting binary headers violates HTTP/2 spec. // TODO(zhaoq): Maybe check if k is ASCII also. func encodeKeyValue(k, v string) (string, string) { - if isASCII(v) { - return k, v + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) } - key := strings.ToLower(k + binHdrSuffix) - val := base64.StdEncoding.EncodeToString([]byte(v)) - return key, string(val) + return k, v } // DecodeKeyValue returns the original key and value corresponding to the @@ -75,12 +64,11 @@ func DecodeKeyValue(k, v string) (string, string, error) { if !strings.HasSuffix(k, binHdrSuffix) { return k, v, nil } - key := k[:len(k)-len(binHdrSuffix)] val, err := base64.StdEncoding.DecodeString(v) if err != nil { return "", "", err } - return key, string(val), nil + return k, string(val), nil } // MD is a mapping from metadata keys to values. Users should use the following diff --git a/vendor/google.golang.org/grpc/metadata/metadata_test.go b/vendor/google.golang.org/grpc/metadata/metadata_test.go new file mode 100644 index 00000000000..02e6ba518ef --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata_test.go @@ -0,0 +1,107 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package metadata + +import ( + "reflect" + "testing" +) + +const binaryValue = string(128) + +func TestEncodeKeyValue(t *testing.T) { + for _, test := range []struct { + // input + kin string + vin string + // output + kout string + vout string + }{ + {"key", "abc", "key", "abc"}, + {"KEY", "abc", "key", "abc"}, + {"key-bin", "abc", "key-bin", "YWJj"}, + {"key-bin", binaryValue, "key-bin", "woA="}, + } { + k, v := encodeKeyValue(test.kin, test.vin) + if k != test.kout || !reflect.DeepEqual(v, test.vout) { + t.Fatalf("encodeKeyValue(%q, %q) = %q, %q, want %q, %q", test.kin, test.vin, k, v, test.kout, test.vout) + } + } +} + +func TestDecodeKeyValue(t *testing.T) { + for _, test := range []struct { + // input + kin string + vin string + // output + kout string + vout string + err error + }{ + {"a", "abc", "a", "abc", nil}, + {"key-bin", "Zm9vAGJhcg==", "key-bin", "foo\x00bar", nil}, + {"key-bin", "woA=", "key-bin", binaryValue, nil}, + } { + k, v, err := DecodeKeyValue(test.kin, test.vin) + if k != test.kout || !reflect.DeepEqual(v, test.vout) || !reflect.DeepEqual(err, test.err) { + t.Fatalf("DecodeKeyValue(%q, %q) = %q, %q, %v, want %q, %q, %v", test.kin, test.vin, k, v, err, test.kout, test.vout, test.err) + } + } +} + +func TestPairsMD(t *testing.T) { + for _, test := range []struct { + // input + kv []string + // output + md MD + size int + }{ + {[]string{}, MD{}, 0}, + {[]string{"k1", "v1", "k2-bin", binaryValue}, New(map[string]string{ + "k1": "v1", + "k2-bin": binaryValue, + }), 2}, + } { + md := Pairs(test.kv...) + if !reflect.DeepEqual(md, test.md) { + t.Fatalf("Pairs(%v) = %v, want %v", test.kv, md, test.md) + } + if md.Len() != test.size { + t.Fatalf("Pairs(%v) generates md of size %d, want %d", test.kv, md.Len(), test.size) + } + } +} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 00000000000..c2e0871e6f8 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +package naming + +// Operation defines the corresponding operations for a name resolution change. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an exisiting address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000000..bfa6205ba9e --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker.go b/vendor/google.golang.org/grpc/picker.go deleted file mode 100644 index bc48573a419..00000000000 --- a/vendor/google.golang.org/grpc/picker.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc/transport" -) - -// Picker picks a Conn for RPC requests. -// This is EXPERIMENTAL and please do not implement your own Picker for now. -type Picker interface { - // Init does initial processing for the Picker, e.g., initiate some connections. - Init(cc *ClientConn) error - // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC - // or some error happens. - Pick(ctx context.Context) (transport.ClientTransport, error) - // State returns the connectivity state of the underlying connections. - State() ConnectivityState - // WaitForStateChange blocks until the state changes to something other than - // the sourceState or timeout fires on cc. It returns false if timeout fires, - // and true otherwise. - WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool - // Close closes all the Conn's owned by this Picker. - Close() error -} - -// unicastPicker is the default Picker which is used when there is no custom Picker -// specified by users. It always picks the same Conn. -type unicastPicker struct { - conn *Conn -} - -func (p *unicastPicker) Init(cc *ClientConn) error { - c, err := NewConn(cc) - if err != nil { - return err - } - p.conn = c - return nil -} - -func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { - return p.conn.Wait(ctx) -} - -func (p *unicastPicker) State() ConnectivityState { - return p.conn.State() -} - -func (p *unicastPicker) WaitForStateChange(timeout time.Duration, sourceState ConnectivityState) bool { - return p.conn.WaitForStateChange(timeout, sourceState) -} - -func (p *unicastPicker) Close() error { - if p.conn != nil { - return p.conn.Close() - } - return nil -} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 00000000000..04b6371afcb --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 00000000000..da90479e716 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,694 @@ +// Code generated by protoc-gen-go. +// source: reflection.proto +// DO NOT EDIT! + +/* +Package grpc_reflection_v1alpha is a generated protocol buffer package. + +It is generated from these files: + reflection.proto + +It has these top-level messages: + ServerReflectionRequest + ExtensionRequest + ServerReflectionResponse + FileDescriptorResponse + ExtensionNumberResponse + ListServiceResponse + ServiceResponse + ErrorResponse +*/ +package grpc_reflection_v1alpha + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + Host string `protobuf:"bytes,1,opt,name=host" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are valid to be assigned to MessageRequest: + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} } +func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionRequest) ProtoMessage() {} +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,oneof"` +} +type ServerReflectionRequest_FileContainingSymbol struct { + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,oneof"` +} +type ServerReflectionRequest_FileContainingExtension struct { + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,oneof"` +} +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,oneof"` +} +type ServerReflectionRequest_ListServices struct { + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (m *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (m *ServerReflectionRequest) GetListServices() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerReflectionRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerReflectionRequest_OneofMarshaler, _ServerReflectionRequest_OneofUnmarshaler, _ServerReflectionRequest_OneofSizer, []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } +} + +func _ServerReflectionRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerReflectionRequest) + // message_request + switch x := m.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FileByFilename) + case *ServerReflectionRequest_FileContainingSymbol: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FileContainingSymbol) + case *ServerReflectionRequest_FileContainingExtension: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileContainingExtension); err != nil { + return err + } + case *ServerReflectionRequest_AllExtensionNumbersOfType: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AllExtensionNumbersOfType) + case *ServerReflectionRequest_ListServices: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ListServices) + case nil: + default: + return fmt.Errorf("ServerReflectionRequest.MessageRequest has unexpected type %T", x) + } + return nil +} + +func _ServerReflectionRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerReflectionRequest) + switch tag { + case 3: // message_request.file_by_filename + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_FileByFilename{x} + return true, err + case 4: // message_request.file_containing_symbol + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_FileContainingSymbol{x} + return true, err + case 5: // message_request.file_containing_extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExtensionRequest) + err := b.DecodeMessage(msg) + m.MessageRequest = &ServerReflectionRequest_FileContainingExtension{msg} + return true, err + case 6: // message_request.all_extension_numbers_of_type + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_AllExtensionNumbersOfType{x} + return true, err + case 7: // message_request.list_services + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_ListServices{x} + return true, err + default: + return false, nil + } +} + +func _ServerReflectionRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerReflectionRequest) + // message_request + switch x := m.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FileByFilename))) + n += len(x.FileByFilename) + case *ServerReflectionRequest_FileContainingSymbol: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FileContainingSymbol))) + n += len(x.FileContainingSymbol) + case *ServerReflectionRequest_FileContainingExtension: + s := proto.Size(x.FileContainingExtension) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionRequest_AllExtensionNumbersOfType: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AllExtensionNumbersOfType))) + n += len(x.AllExtensionNumbersOfType) + case *ServerReflectionRequest_ListServices: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ListServices))) + n += len(x.ListServices) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` +} + +func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } +func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionRequest) ProtoMessage() {} +func (*ExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The server set one of the following fields accroding to the message_request + // in the request. + // + // Types that are valid to be assigned to MessageResponse: + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} } +func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionResponse) ProtoMessage() {} +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,oneof"` +} +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,oneof"` +} +type ServerReflectionResponse_ListServicesResponse struct { + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,oneof"` +} +type ServerReflectionResponse_ErrorResponse struct { + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerReflectionResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerReflectionResponse_OneofMarshaler, _ServerReflectionResponse_OneofUnmarshaler, _ServerReflectionResponse_OneofSizer, []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } +} + +func _ServerReflectionResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerReflectionResponse) + // message_response + switch x := m.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileDescriptorResponse); err != nil { + return err + } + case *ServerReflectionResponse_AllExtensionNumbersResponse: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AllExtensionNumbersResponse); err != nil { + return err + } + case *ServerReflectionResponse_ListServicesResponse: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListServicesResponse); err != nil { + return err + } + case *ServerReflectionResponse_ErrorResponse: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ErrorResponse); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServerReflectionResponse.MessageResponse has unexpected type %T", x) + } + return nil +} + +func _ServerReflectionResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerReflectionResponse) + switch tag { + case 4: // message_response.file_descriptor_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileDescriptorResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_FileDescriptorResponse{msg} + return true, err + case 5: // message_response.all_extension_numbers_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExtensionNumberResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_AllExtensionNumbersResponse{msg} + return true, err + case 6: // message_response.list_services_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListServiceResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_ListServicesResponse{msg} + return true, err + case 7: // message_response.error_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ErrorResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_ErrorResponse{msg} + return true, err + default: + return false, nil + } +} + +func _ServerReflectionResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerReflectionResponse) + // message_response + switch x := m.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + s := proto.Size(x.FileDescriptorResponse) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_AllExtensionNumbersResponse: + s := proto.Size(x.AllExtensionNumbersResponse) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_ListServicesResponse: + s := proto.Size(x.ListServicesResponse) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_ErrorResponse: + s := proto.Size(x.ErrorResponse) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} } +func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorResponse) ProtoMessage() {} +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` +} + +func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} } +func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionNumberResponse) ProtoMessage() {} +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service" json:"service,omitempty"` +} + +func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } +func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceResponse) ProtoMessage() {} +func (*ListServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListServiceResponse) GetService() []*ServiceResponse { + if m != nil { + return m.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } +func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ServiceResponse) ProtoMessage() {} +func (*ServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"` +} + +func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } +func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } +func (*ErrorResponse) ProtoMessage() {} +func (*ErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func init() { + proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest") + proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest") + proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse") + proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse") + proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse") + proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse") + proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse") + proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for ServerReflection service + +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc *grpc.ClientConn +} + +func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := grpc.NewClientStream(ctx, &_ServerReflection_serviceDesc.Streams[0], c.cc, "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for ServerReflection service + +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { + s.RegisterService(&_ServerReflection_serviceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ServerReflection_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("reflection.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 646 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xd3, 0x4c, + 0x10, 0xfd, 0xd2, 0xe6, 0x47, 0x99, 0xfc, 0xf9, 0xdb, 0x86, 0xc4, 0x05, 0x15, 0x21, 0x43, 0x21, + 0x45, 0x28, 0xb4, 0x46, 0xe2, 0x01, 0x52, 0x40, 0x45, 0x2a, 0x2d, 0x72, 0xb8, 0x41, 0x5c, 0x58, + 0x8e, 0xb3, 0x4e, 0x0d, 0x8e, 0xd7, 0xec, 0xba, 0x81, 0x5c, 0xf1, 0x10, 0x3c, 0x14, 0xaf, 0xc4, + 0x25, 0xbb, 0xeb, 0x9f, 0x38, 0xae, 0x0d, 0xea, 0x95, 0xad, 0xb3, 0x33, 0x7b, 0x66, 0xe6, 0x9c, + 0x59, 0x50, 0x28, 0x76, 0x3c, 0x6c, 0x87, 0x2e, 0xf1, 0xc7, 0x01, 0x25, 0x21, 0x41, 0xc3, 0x05, + 0x0d, 0xec, 0x71, 0x06, 0x5e, 0x9d, 0x58, 0x5e, 0x70, 0x65, 0x69, 0xbf, 0x77, 0x60, 0x38, 0xc5, + 0x74, 0x85, 0xa9, 0x91, 0x1e, 0x1a, 0xf8, 0xeb, 0x35, 0x66, 0x21, 0x42, 0x50, 0xbd, 0x22, 0x2c, + 0x54, 0x2b, 0x0f, 0x2a, 0xa3, 0xa6, 0x21, 0xff, 0xd1, 0x53, 0x50, 0x1c, 0xd7, 0xc3, 0xe6, 0x6c, + 0x6d, 0x8a, 0xaf, 0x6f, 0x2d, 0xb1, 0xba, 0x2b, 0xce, 0xcf, 0xfe, 0x33, 0xba, 0x02, 0x99, 0xac, + 0xdf, 0xc4, 0x38, 0x7a, 0x09, 0x03, 0x19, 0x6b, 0x13, 0x3f, 0xb4, 0x5c, 0xdf, 0xf5, 0x17, 0x26, + 0x5b, 0x2f, 0x67, 0xc4, 0x53, 0xab, 0x71, 0x46, 0x5f, 0x9c, 0x9f, 0xa6, 0xc7, 0x53, 0x79, 0x8a, + 0x16, 0xb0, 0x9f, 0xcf, 0xc3, 0xdf, 0x43, 0xec, 0x33, 0x5e, 0x9b, 0x5a, 0xe3, 0xa9, 0x2d, 0xfd, + 0x68, 0x5c, 0xd2, 0xd0, 0xf8, 0x75, 0x12, 0x19, 0x77, 0xc1, 0x59, 0x86, 0xdb, 0x2c, 0x69, 0x04, + 0x9a, 0xc0, 0x81, 0xe5, 0x79, 0x9b, 0xcb, 0x4d, 0xff, 0x7a, 0x39, 0xc3, 0x94, 0x99, 0xc4, 0x31, + 0xc3, 0x75, 0x80, 0xd5, 0x7a, 0x5c, 0xe7, 0x3e, 0x0f, 0x4b, 0xd3, 0x2e, 0xa2, 0xa0, 0x4b, 0xe7, + 0x03, 0x0f, 0x41, 0x87, 0xd0, 0xf1, 0x5c, 0x16, 0x9a, 0x8c, 0x0f, 0xd1, 0xb5, 0x31, 0x53, 0x1b, + 0x71, 0x4e, 0x5b, 0xc0, 0xd3, 0x18, 0x9d, 0xfc, 0x0f, 0xbd, 0x25, 0x66, 0xcc, 0x5a, 0x60, 0x93, + 0x46, 0x85, 0x69, 0x0e, 0x28, 0xf9, 0x62, 0xd1, 0x13, 0xe8, 0x65, 0xba, 0x96, 0x35, 0x44, 0xd3, + 0xef, 0x6e, 0x60, 0x49, 0x7b, 0x04, 0x4a, 0xbe, 0x6c, 0x75, 0x87, 0x47, 0xd6, 0x8c, 0x1e, 0xde, + 0x2e, 0x54, 0xfb, 0x55, 0x05, 0xf5, 0xa6, 0xc4, 0x2c, 0x20, 0x3e, 0xc3, 0xe8, 0x00, 0x60, 0x65, + 0x79, 0xee, 0xdc, 0xcc, 0x28, 0xdd, 0x94, 0xc8, 0x99, 0x90, 0xfb, 0x13, 0x28, 0x84, 0xba, 0x0b, + 0xd7, 0xb7, 0xbc, 0xa4, 0x6e, 0x49, 0xd3, 0xd2, 0x8f, 0x4b, 0x15, 0x28, 0xb1, 0x93, 0xd1, 0x4b, + 0x6e, 0x4a, 0x9a, 0xfd, 0x02, 0xaa, 0xd4, 0x79, 0x8e, 0x99, 0x4d, 0xdd, 0x20, 0x24, 0x94, 0x73, + 0x44, 0x75, 0x49, 0x87, 0xb4, 0xf4, 0xe7, 0xa5, 0x24, 0xc2, 0x64, 0xaf, 0xd2, 0xbc, 0xa4, 0x1d, + 0x3e, 0x76, 0x69, 0xb9, 0x9b, 0x27, 0xe8, 0x1b, 0xdc, 0x2f, 0xd6, 0x3a, 0xa5, 0xac, 0xfd, 0xa3, + 0xaf, 0x9c, 0x01, 0x32, 0x9c, 0xf7, 0x0a, 0xec, 0x91, 0x12, 0xcf, 0x61, 0xb0, 0x65, 0x90, 0x0d, + 0x61, 0x5d, 0x12, 0x3e, 0x2b, 0x25, 0x3c, 0xdf, 0x18, 0x28, 0x43, 0xd6, 0xcf, 0xfa, 0x2a, 0x65, + 0xb9, 0x84, 0x2e, 0xa6, 0x34, 0x3b, 0xc1, 0x86, 0xbc, 0xfd, 0x71, 0x79, 0x3b, 0x22, 0x3c, 0x73, + 0x6f, 0x07, 0x67, 0x81, 0x09, 0x02, 0x65, 0x63, 0xd8, 0x08, 0xd3, 0xce, 0x61, 0x50, 0x3c, 0x77, + 0xa4, 0xc3, 0x9d, 0xbc, 0x94, 0xf2, 0xe1, 0xe1, 0x8e, 0xda, 0x1d, 0xb5, 0x8d, 0xbd, 0x6d, 0x51, + 0xde, 0x8b, 0x23, 0xed, 0x33, 0x0c, 0x4b, 0x46, 0x8a, 0x1e, 0x41, 0x77, 0x66, 0x31, 0x2c, 0x17, + 0xc0, 0x94, 0x6f, 0x4c, 0xe4, 0xcc, 0xb6, 0x40, 0x85, 0xff, 0x2f, 0xc4, 0xfb, 0x52, 0xbc, 0x03, + 0xbb, 0x45, 0x3b, 0xf0, 0x11, 0xf6, 0x0a, 0xa6, 0xc9, 0x1f, 0x80, 0x46, 0x2c, 0x8b, 0x2c, 0xb4, + 0xa5, 0x8f, 0xfe, 0xea, 0xea, 0x4c, 0xaa, 0x91, 0x24, 0x6a, 0x87, 0xd0, 0xcb, 0x5f, 0xcb, 0x1f, + 0xce, 0x4c, 0xd1, 0xf2, 0x5f, 0x9b, 0x42, 0x67, 0x6b, 0xe2, 0x62, 0xf3, 0x22, 0xc5, 0x6c, 0x32, + 0x8f, 0x42, 0x6b, 0x46, 0x53, 0x22, 0xa7, 0x1c, 0x40, 0x0f, 0x21, 0x12, 0xc4, 0x8c, 0x55, 0x90, + 0x6b, 0xc7, 0x27, 0x20, 0xc1, 0x77, 0x11, 0xa6, 0xff, 0xac, 0x80, 0x92, 0x5f, 0x37, 0xf4, 0x03, + 0xfa, 0x79, 0xec, 0xad, 0xef, 0x10, 0x74, 0xeb, 0x8d, 0xbd, 0x7b, 0x72, 0x8b, 0x8c, 0xa8, 0xab, + 0x51, 0xe5, 0xb8, 0x32, 0xab, 0x4b, 0xe9, 0x5f, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x3f, + 0x7b, 0x08, 0x87, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto new file mode 100644 index 00000000000..276ff0e255d --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -0,0 +1,151 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Service exported by server reflection + +syntax = "proto3"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server set one of the following fields accroding to the message_request + // in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requst. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services request. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go new file mode 100644 index 00000000000..ac49de4aa26 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go @@ -0,0 +1,56 @@ +// Code generated by protoc-gen-go. +// source: proto2.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ToBeExtened struct { + Foo *int32 `protobuf:"varint,1,req,name=foo" json:"foo,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ToBeExtened) Reset() { *m = ToBeExtened{} } +func (m *ToBeExtened) String() string { return proto.CompactTextString(m) } +func (*ToBeExtened) ProtoMessage() {} +func (*ToBeExtened) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +var extRange_ToBeExtened = []proto.ExtensionRange{ + {10, 20}, +} + +func (*ToBeExtened) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ToBeExtened +} + +func (m *ToBeExtened) GetFoo() int32 { + if m != nil && m.Foo != nil { + return *m.Foo + } + return 0 +} + +func init() { + proto.RegisterType((*ToBeExtened)(nil), "grpc.testing.ToBeExtened") +} + +func init() { proto.RegisterFile("proto2.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 85 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0xd2, 0x03, 0x53, 0x42, 0x3c, 0xe9, 0x45, 0x05, 0xc9, 0x7a, 0x25, 0xa9, 0xc5, 0x25, + 0x99, 0x79, 0xe9, 0x4a, 0xaa, 0x5c, 0xdc, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, + 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, 0x41, + 0x20, 0xa6, 0x16, 0x0b, 0x07, 0x97, 0x80, 0x28, 0x20, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xed, 0xbc, + 0xc2, 0x43, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto new file mode 100644 index 00000000000..f79adc49ddb --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto @@ -0,0 +1,8 @@ +syntax = "proto2"; + +package grpc.testing; + +message ToBeExtened { + required int32 foo = 1; + extensions 10 to 20; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go new file mode 100644 index 00000000000..0120ca97246 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go @@ -0,0 +1,88 @@ +// Code generated by protoc-gen-go. +// source: proto2_ext.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + proto2_ext.proto + proto2.proto + test.proto + +It has these top-level messages: + Extension + ToBeExtened + SearchResponse + SearchRequest +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Extension struct { + Baz *int32 `protobuf:"varint,1,opt,name=baz" json:"baz,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (m *Extension) String() string { return proto.CompactTextString(m) } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Extension) GetBaz() int32 { + if m != nil && m.Baz != nil { + return *m.Baz + } + return 0 +} + +var E_Bar = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtened)(nil), + ExtensionType: (*int32)(nil), + Field: 13, + Name: "grpc.testing.bar", + Tag: "varint,13,opt,name=bar", +} + +var E_Baz = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtened)(nil), + ExtensionType: (*Extension)(nil), + Field: 17, + Name: "grpc.testing.baz", + Tag: "bytes,17,opt,name=baz", +} + +func init() { + proto.RegisterType((*Extension)(nil), "grpc.testing.Extension") + proto.RegisterExtension(E_Bar) + proto.RegisterExtension(E_Baz) +} + +func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 130 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0xd1, 0x03, 0x33, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, + 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x25, 0x59, 0x2e, + 0x4e, 0xd7, 0x8a, 0x92, 0xd4, 0xbc, 0xe2, 0xcc, 0xfc, 0x3c, 0x21, 0x01, 0x2e, 0xe6, 0xa4, 0xc4, + 0x2a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xd6, 0x20, 0x10, 0xd3, 0x4a, 0x1b, 0x24, 0x52, 0x24, 0x24, + 0xa9, 0x87, 0x6c, 0x84, 0x5e, 0x48, 0xbe, 0x53, 0x2a, 0x58, 0x57, 0x6a, 0x8a, 0x04, 0x2f, 0x4c, + 0x71, 0x91, 0x95, 0x0b, 0x58, 0x3b, 0x3e, 0xc5, 0x82, 0x40, 0xc5, 0xdc, 0x46, 0xe2, 0xa8, 0x0a, + 0xe0, 0xf6, 0x83, 0xad, 0x04, 0x04, 0x00, 0x00, 0xff, 0xff, 0x59, 0xfa, 0x16, 0xbc, 0xc0, 0x00, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto new file mode 100644 index 00000000000..c2dd737ac52 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto @@ -0,0 +1,13 @@ +syntax = "proto2"; + +package grpc.testing; + +import "proto2.proto"; +extend ToBeExtened { + optional int32 bar = 13; + optional Extension baz = 17; +} + +message Extension { + optional int32 baz = 1; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go new file mode 100644 index 00000000000..add7abd09d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go @@ -0,0 +1,220 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SearchResponse struct { + Results []*SearchResponse_Result `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *SearchResponse) GetResults() []*SearchResponse_Result { + if m != nil { + return m.Results + } + return nil +} + +type SearchResponse_Result struct { + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Snippets []string `protobuf:"bytes,3,rep,name=snippets" json:"snippets,omitempty"` +} + +func (m *SearchResponse_Result) Reset() { *m = SearchResponse_Result{} } +func (m *SearchResponse_Result) String() string { return proto.CompactTextString(m) } +func (*SearchResponse_Result) ProtoMessage() {} +func (*SearchResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +type SearchRequest struct { + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func init() { + proto.RegisterType((*SearchResponse)(nil), "grpc.testing.SearchResponse") + proto.RegisterType((*SearchResponse_Result)(nil), "grpc.testing.SearchResponse.Result") + proto.RegisterType((*SearchRequest)(nil), "grpc.testing.SearchRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for SearchService service + +type SearchServiceClient interface { + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) + StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) +} + +type searchServiceClient struct { + cc *grpc.ClientConn +} + +func NewSearchServiceClient(cc *grpc.ClientConn) SearchServiceClient { + return &searchServiceClient{cc} +} + +func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := grpc.Invoke(ctx, "/grpc.testing.SearchService/Search", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchServiceClient) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SearchService_serviceDesc.Streams[0], c.cc, "/grpc.testing.SearchService/StreamingSearch", opts...) + if err != nil { + return nil, err + } + x := &searchServiceStreamingSearchClient{stream} + return x, nil +} + +type SearchService_StreamingSearchClient interface { + Send(*SearchRequest) error + Recv() (*SearchResponse, error) + grpc.ClientStream +} + +type searchServiceStreamingSearchClient struct { + grpc.ClientStream +} + +func (x *searchServiceStreamingSearchClient) Send(m *SearchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *searchServiceStreamingSearchClient) Recv() (*SearchResponse, error) { + m := new(SearchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for SearchService service + +type SearchServiceServer interface { + Search(context.Context, *SearchRequest) (*SearchResponse, error) + StreamingSearch(SearchService_StreamingSearchServer) error +} + +func RegisterSearchServiceServer(s *grpc.Server, srv SearchServiceServer) { + s.RegisterService(&_SearchService_serviceDesc, srv) +} + +func _SearchService_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServiceServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.SearchService/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServiceServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SearchService_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SearchServiceServer).StreamingSearch(&searchServiceStreamingSearchServer{stream}) +} + +type SearchService_StreamingSearchServer interface { + Send(*SearchResponse) error + Recv() (*SearchRequest, error) + grpc.ServerStream +} + +type searchServiceStreamingSearchServer struct { + grpc.ServerStream +} + +func (x *searchServiceStreamingSearchServer) Send(m *SearchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *searchServiceStreamingSearchServer) Recv() (*SearchRequest, error) { + m := new(SearchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SearchService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.SearchService", + HandlerType: (*SearchServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Search", + Handler: _SearchService_Search_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingSearch", + Handler: _SearchService_StreamingSearch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor2, +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x03, 0x09, 0x64, + 0xe6, 0xa5, 0x2b, 0xcd, 0x65, 0xe4, 0xe2, 0x0b, 0x4e, 0x4d, 0x2c, 0x4a, 0xce, 0x08, 0x4a, 0x2d, + 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0xb2, 0xe5, 0x62, 0x2f, 0x4a, 0x2d, 0x2e, 0xcd, 0x29, 0x29, + 0x96, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd6, 0x43, 0xd6, 0xa2, 0x87, 0xaa, 0x5c, 0x2f, + 0x08, 0xac, 0x36, 0x08, 0xa6, 0x47, 0xca, 0x87, 0x8b, 0x0d, 0x22, 0x24, 0x24, 0xc0, 0xc5, 0x5c, + 0x5a, 0x94, 0x03, 0x34, 0x84, 0x51, 0x83, 0x33, 0x08, 0xc4, 0x14, 0x12, 0xe1, 0x62, 0x2d, 0xc9, + 0x2c, 0xc9, 0x49, 0x95, 0x60, 0x02, 0x8b, 0x41, 0x38, 0x42, 0x52, 0x5c, 0x1c, 0xc5, 0x79, 0x99, + 0x05, 0x05, 0xa9, 0x40, 0x1b, 0x99, 0x81, 0x36, 0x72, 0x06, 0xc1, 0xf9, 0x4a, 0xaa, 0x5c, 0xbc, + 0x30, 0xfb, 0x0a, 0x4b, 0x81, 0x0e, 0x00, 0x19, 0x01, 0x64, 0x14, 0x55, 0x42, 0x8d, 0x85, 0x70, + 0x8c, 0x96, 0x31, 0xc2, 0xd4, 0x05, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x0a, 0x39, 0x73, 0xb1, + 0x41, 0x04, 0x84, 0xa4, 0xb1, 0x3b, 0x1f, 0x6c, 0x9c, 0x94, 0x0c, 0x3e, 0xbf, 0x09, 0x05, 0x70, + 0xf1, 0x07, 0x97, 0x14, 0xa5, 0x26, 0xe6, 0x02, 0xe5, 0x28, 0x36, 0x4d, 0x83, 0xd1, 0x80, 0x31, + 0x89, 0x0d, 0x1c, 0x09, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0xd6, 0x09, 0xb8, 0x92, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto new file mode 100644 index 00000000000..d74d0fc1336 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package grpc.testing; + +message SearchResponse { + message Result { + string url = 1; + string title = 2; + repeated string snippets = 3; + } + repeated Result results = 1; +} + +message SearchRequest { + string query = 1; +} + +service SearchService { + rpc Search(SearchRequest) returns (SearchResponse); + rpc StreamingSearch(stream SearchRequest) returns (stream SearchResponse); +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 00000000000..c3327ac5def --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,395 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) + +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +type serverReflectionServer struct { + s *grpc.Server + // TODO add more cache if necessary + serviceInfo map[string]*grpc.ServiceInfo // cache for s.GetServiceInfo() +} + +// Register registers the server reflection service on the given gRPC server. +func Register(s *grpc.Server) { + rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ + s: s, + }) +} + +// protoMessage is used for type assertion on proto messages. +// Generated proto message implements function Descriptor(), but Descriptor() +// is not part of interface proto.Message. This interface is needed to +// call Descriptor(). +type protoMessage interface { + Descriptor() ([]byte, []int) +} + +// fileDescForType gets the file descriptor for the given type. +// The given type should be a proto message. +func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + enc, _ := m.Descriptor() + + return s.decodeFileDesc(enc) +} + +// decodeFileDesc does decompression and unmarshalling on the given +// file descriptor byte slice. +func (s *serverReflectionServer) decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { + raw, err := decompress(enc) + if err != nil { + return nil, fmt.Errorf("failed to decompress enc: %v", err) + } + + fd := new(dpb.FileDescriptorProto) + if err := proto.Unmarshal(raw, fd); err != nil { + return nil, fmt.Errorf("bad descriptor: %v", err) + } + return fd, nil +} + +// decompress does gzip decompression. +func decompress(b []byte) ([]byte, error) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v\n", err) + } + out, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v\n", err) + } + return out, nil +} + +func (s *serverReflectionServer) typeForName(name string) (reflect.Type, error) { + pt := proto.MessageType(name) + if pt == nil { + return nil, fmt.Errorf("unknown type: %q", name) + } + st := pt.Elem() + + return st, nil +} + +func (s *serverReflectionServer) fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + var extDesc *proto.ExtensionDesc + for id, desc := range proto.RegisteredExtensions(m) { + if id == ext { + extDesc = desc + break + } + } + + if extDesc == nil { + return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) + } + + extT := reflect.TypeOf(extDesc.ExtensionType).Elem() + + return s.fileDescForType(extT) +} + +func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + exts := proto.RegisteredExtensions(m) + out := make([]int32, 0, len(exts)) + for id := range exts { + out = append(out, id) + } + return out, nil +} + +// fileDescEncodingByFilename finds the file descriptor for given filename, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) { + enc := proto.FileDescriptor(name) + if enc == nil { + return nil, fmt.Errorf("unknown file: %v", name) + } + fd, err := s.decodeFileDesc(enc) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// serviceMetadataForSymbol finds the metadata for name in s.serviceInfo. +// name should be a service name or a method name. +func (s *serverReflectionServer) serviceMetadataForSymbol(name string) (interface{}, error) { + if s.serviceInfo == nil { + s.serviceInfo = s.s.GetServiceInfo() + } + + // Check if it's a service name. + if info, ok := s.serviceInfo[name]; ok { + return info.Metadata, nil + } + + // Check if it's a method name. + pos := strings.LastIndex(name, ".") + // Not a valid method name. + if pos == -1 { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + info, ok := s.serviceInfo[name[:pos]] + // Substring before last "." is not a service name. + if !ok { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + // Search the method name in info.Methods. + var found bool + for _, m := range info.Methods { + if m.Name == name[pos+1:] { + found = true + break + } + } + if found { + return info.Metadata, nil + } + + return nil, fmt.Errorf("unknown symbol: %v", name) +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol, +// does marshalling on it and returns the marshalled result. +// The given symbol can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) { + var ( + fd *dpb.FileDescriptorProto + ) + // Check if it's a type name. + if st, err := s.typeForName(name); err == nil { + fd, err = s.fileDescForType(st) + if err != nil { + return nil, err + } + } else { // Check if it's a service name or a method name. + meta, err := s.serviceMetadataForSymbol(name) + + // Metadata not found. + if err != nil { + return nil, err + } + + // Metadata not valid. + enc, ok := meta.([]byte) + if !ok { + return nil, fmt.Errorf("invalid file descriptor for symbol: %v", name) + } + + fd, err = s.decodeFileDesc(enc) + if err != nil { + return nil, err + } + } + + return proto.Marshal(fd) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing given extension, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) { + st, err := s.typeForName(typeName) + if err != nil { + return nil, err + } + fd, err := s.fileDescContainingExtension(st, extNum) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + st, err := s.typeForName(name) + if err != nil { + return nil, err + } + extNums, err := s.allExtensionNumbersForType(st) + if err != nil { + return nil, err + } + return extNums, nil +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &rpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *rpb.ServerReflectionRequest_FileByFilename: + b, err := s.fileDescEncodingByFilename(req.FileByFilename) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *rpb.ServerReflectionRequest_ListServices: + if s.serviceInfo == nil { + s.serviceInfo = s.s.GetServiceInfo() + } + serviceResponses := make([]*rpb.ServiceResponse, 0, len(s.serviceInfo)) + for n := range s.serviceInfo { + serviceResponses = append(serviceResponses, &rpb.ServiceResponse{ + Name: n, + }) + } + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ + Service: serviceResponses, + }, + } + default: + return grpc.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection_test.go b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go new file mode 100644 index 00000000000..ca9610e263e --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go @@ -0,0 +1,492 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package reflection + +import ( + "fmt" + "net" + "reflect" + "sort" + "testing" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "golang.org/x/net/context" + "google.golang.org/grpc" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + pb "google.golang.org/grpc/reflection/grpc_testing" +) + +var ( + s = &serverReflectionServer{} + // fileDescriptor of each test proto file. + fdTest *dpb.FileDescriptorProto + fdProto2 *dpb.FileDescriptorProto + fdProto2Ext *dpb.FileDescriptorProto + // fileDescriptor marshalled. + fdTestByte []byte + fdProto2Byte []byte + fdProto2ExtByte []byte +) + +func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { + enc := proto.FileDescriptor(filename) + if enc == nil { + panic(fmt.Sprintf("failed to find fd for file: %v", filename)) + } + fd, err := s.decodeFileDesc(enc) + if err != nil { + panic(fmt.Sprintf("failed to decode enc: %v", err)) + } + b, err := proto.Marshal(fd) + if err != nil { + panic(fmt.Sprintf("failed to marshal fd: %v", err)) + } + return fd, b +} + +func init() { + fdTest, fdTestByte = loadFileDesc("test.proto") + fdProto2, fdProto2Byte = loadFileDesc("proto2.proto") + fdProto2Ext, fdProto2ExtByte = loadFileDesc("proto2_ext.proto") +} + +func TestFileDescForType(t *testing.T) { + for _, test := range []struct { + st reflect.Type + wantFd *dpb.FileDescriptorProto + }{ + {reflect.TypeOf(pb.SearchResponse_Result{}), fdTest}, + {reflect.TypeOf(pb.ToBeExtened{}), fdProto2}, + } { + fd, err := s.fileDescForType(test.st) + if err != nil || !reflect.DeepEqual(fd, test.wantFd) { + t.Errorf("fileDescForType(%q) = %q, %v, want %q, ", test.st, fd, err, test.wantFd) + } + } +} + +func TestTypeForName(t *testing.T) { + for _, test := range []struct { + name string + want reflect.Type + }{ + {"grpc.testing.SearchResponse", reflect.TypeOf(pb.SearchResponse{})}, + } { + r, err := s.typeForName(test.name) + if err != nil || r != test.want { + t.Errorf("typeForName(%q) = %q, %v, want %q, ", test.name, r, err, test.want) + } + } +} + +func TestTypeForNameNotFound(t *testing.T) { + for _, test := range []string{ + "grpc.testing.not_exiting", + } { + _, err := s.typeForName(test) + if err == nil { + t.Errorf("typeForName(%q) = _, %v, want _, ", test, err) + } + } +} + +func TestFileDescContainingExtension(t *testing.T) { + for _, test := range []struct { + st reflect.Type + extNum int32 + want *dpb.FileDescriptorProto + }{ + {reflect.TypeOf(pb.ToBeExtened{}), 17, fdProto2Ext}, + } { + fd, err := s.fileDescContainingExtension(test.st, test.extNum) + if err != nil || !reflect.DeepEqual(fd, test.want) { + t.Errorf("fileDescContainingExtension(%q) = %q, %v, want %q, ", test.st, fd, err, test.want) + } + } +} + +// intArray is used to sort []int32 +type intArray []int32 + +func (s intArray) Len() int { return len(s) } +func (s intArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s intArray) Less(i, j int) bool { return s[i] < s[j] } + +func TestAllExtensionNumbersForType(t *testing.T) { + for _, test := range []struct { + st reflect.Type + want []int32 + }{ + {reflect.TypeOf(pb.ToBeExtened{}), []int32{13, 17}}, + } { + r, err := s.allExtensionNumbersForType(test.st) + sort.Sort(intArray(r)) + if err != nil || !reflect.DeepEqual(r, test.want) { + t.Errorf("allExtensionNumbersForType(%q) = %v, %v, want %v, ", test.st, r, err, test.want) + } + } +} + +// Do end2end tests. + +type server struct{} + +func (s *server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.SearchResponse, error) { + return &pb.SearchResponse{}, nil +} + +func (s *server) StreamingSearch(stream pb.SearchService_StreamingSearchServer) error { + return nil +} + +func TestReflectionEnd2end(t *testing.T) { + // Start server. + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterSearchServiceServer(s, &server{}) + // Register reflection service on s. + Register(s) + go s.Serve(lis) + + // Create client. + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("cannot connect to server: %v", err) + } + defer conn.Close() + + c := rpb.NewServerReflectionClient(conn) + stream, err := c.ServerReflectionInfo(context.Background()) + + testFileByFilename(t, stream) + testFileByFilenameError(t, stream) + testFileContainingSymbol(t, stream) + testFileContainingSymbolError(t, stream) + testFileContainingExtension(t, stream) + testFileContainingExtensionError(t, stream) + testAllExtensionNumbersOfType(t, stream) + testAllExtensionNumbersOfTypeError(t, stream) + testListServices(t, stream) + + s.Stop() +} + +func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + filename string + want []byte + }{ + {"test.proto", fdTestByte}, + {"proto2.proto", fdProto2Byte}, + {"proto2_ext.proto", fdProto2ExtByte}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: test.filename, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_FileDescriptorResponse: + if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { + t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", test.filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) + } + default: + t.Errorf("FileByFilename(%v) = %v, want type ", test.filename, r.MessageResponse) + } + } +} + +func testFileByFilenameError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []string{ + "test.poto", + "proo2.proto", + "proto2_et.proto", + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: test, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("FileByFilename(%v) = %v, want type ", test, r.MessageResponse) + } + } +} + +func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + symbol string + want []byte + }{ + {"grpc.testing.SearchService", fdTestByte}, + {"grpc.testing.SearchService.Search", fdTestByte}, + {"grpc.testing.SearchService.StreamingSearch", fdTestByte}, + {"grpc.testing.SearchResponse", fdTestByte}, + {"grpc.testing.ToBeExtened", fdProto2Byte}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: test.symbol, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_FileDescriptorResponse: + if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { + t.Errorf("FileContainingSymbol(%v)\nreceived: %q,\nwant: %q", test.symbol, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) + } + default: + t.Errorf("FileContainingSymbol(%v) = %v, want type ", test.symbol, r.MessageResponse) + } + } +} + +func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []string{ + "grpc.testing.SerchService", + "grpc.testing.SearchService.SearchE", + "grpc.tesing.SearchResponse", + "gpc.testing.ToBeExtened", + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: test, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("FileContainingSymbol(%v) = %v, want type ", test, r.MessageResponse) + } + } +} + +func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + typeName string + extNum int32 + want []byte + }{ + {"grpc.testing.ToBeExtened", 17, fdProto2ExtByte}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &rpb.ExtensionRequest{ + ContainingType: test.typeName, + ExtensionNumber: test.extNum, + }, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_FileDescriptorResponse: + if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { + t.Errorf("FileContainingExtension(%v, %v)\nreceived: %q,\nwant: %q", test.typeName, test.extNum, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) + } + default: + t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) + } + } +} + +func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + typeName string + extNum int32 + }{ + {"grpc.testing.ToBExtened", 17}, + {"grpc.testing.ToBeExtened", 15}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &rpb.ExtensionRequest{ + ContainingType: test.typeName, + ExtensionNumber: test.extNum, + }, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) + } + } +} + +func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + typeName string + want []int32 + }{ + {"grpc.testing.ToBeExtened", []int32{13, 17}}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: test.typeName, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_AllExtensionNumbersResponse: + extNum := r.GetAllExtensionNumbersResponse().ExtensionNumber + sort.Sort(intArray(extNum)) + if r.GetAllExtensionNumbersResponse().BaseTypeName != test.typeName || + !reflect.DeepEqual(extNum, test.want) { + t.Errorf("AllExtensionNumbersOfType(%v)\nreceived: %v,\nwant: {%q %v}", r.GetAllExtensionNumbersResponse(), test.typeName, test.typeName, test.want) + } + default: + t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test.typeName, r.MessageResponse) + } + } +} + +func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []string{ + "grpc.testing.ToBeExtenedE", + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: test, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test, r.MessageResponse) + } + } +} + +func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_ListServices{}, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ListServicesResponse: + services := r.GetListServicesResponse().Service + want := []string{"grpc.testing.SearchService", "grpc.reflection.v1alpha.ServerReflection"} + // Compare service names in response with want. + if len(services) != len(want) { + t.Errorf("= %v, want service names: %v", services, want) + } + m := make(map[string]int) + for _, e := range services { + m[e.Name]++ + } + for _, e := range want { + if m[e] > 0 { + m[e]-- + continue + } + t.Errorf("ListService\nreceived: %v,\nwant: %q", services, want) + } + default: + t.Errorf("ListServices = %v, want type ", r.MessageResponse) + } +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 46a6801b075..d628717560a 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -34,13 +34,14 @@ package grpc import ( + "bytes" + "compress/gzip" "encoding/binary" "fmt" "io" + "io/ioutil" "math" - "math/rand" "os" - "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" @@ -60,7 +61,7 @@ type Codec interface { String() string } -// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. +// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. type protoCodec struct{} func (protoCodec) Marshal(v interface{}) ([]byte, error) { @@ -75,6 +76,73 @@ func (protoCodec) String() string { return "proto" } +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{} +} + +type gzipCompressor struct { +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := gzip.NewWriter(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + z, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer z.Close() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + traceInfo traceInfo // in trace.go +} + +var defaultCallInfo = callInfo{failFast: true} + // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { @@ -113,41 +181,67 @@ func Trailer(md *metadata.MD) CallOption { }) } +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failfast is true, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will retry +// the call if it fails due to a transient error. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md +func FailFast(failFast bool) CallOption { + return beforeCall(func(c *callInfo) error { + c.failFast = failFast + return nil + }) +} + // The format of the payload: compressed or not? type payloadFormat uint8 const ( compressionNone payloadFormat = iota // no compression - compressionFlate - // More formats + compressionMade ) -// parser reads complelete gRPC messages from the underlying reader. +// parser reads complete gRPC messages from the underlying reader. type parser struct { - s io.Reader -} + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader -// recvMsg is to read a complete gRPC message from the stream. It is blocking if -// the message has not been complete yet. It returns the message and its type, -// EOF is returned with nil msg and 0 pf if the entire stream is done. Other -// non-nil error is returned if something is wrong on reading. -func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { // The header of a gRPC message. Find more detail // at http://www.grpc.io/docs/guides/wire.html. - var buf [5]byte + header [5]byte +} - if _, err := io.ReadFull(p.s, buf[:]); err != nil { +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { return 0, nil, err } - pf = payloadFormat(buf[0]) - length := binary.BigEndian.Uint32(buf[1:]) + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { return pf, nil, nil } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.s, msg); err != nil { + if _, err := io.ReadFull(p.r, msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -158,7 +252,7 @@ func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { // encode serializes msg and prepends the message header. If msg is nil, it // generates the message header of 0 message length. -func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { var b []byte var length uint if msg != nil { @@ -168,6 +262,12 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { if err != nil { return nil, err } + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, err + } + b = cbuf.Bytes() + } length = uint(len(b)) } if length > math.MaxUint32 { @@ -182,7 +282,11 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { var buf = make([]byte, payloadLen+sizeLen+len(b)) // Write payload format - buf[0] = byte(pf) + if cp == nil { + buf[0] = byte(compressionNone) + } else { + buf[0] = byte(compressionMade) + } // Write length of b into buf binary.BigEndian.PutUint32(buf[1:], uint32(length)) // Copy encoded msg to buf @@ -191,22 +295,35 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { return buf, nil } -func recv(p *parser, c Codec, m interface{}) error { +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if dc == nil || recvCompress != dc.Type() { + return transport.StreamErrorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return transport.StreamErrorf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error { pf, d, err := p.recvMsg() if err != nil { return err } - switch pf { - case compressionNone: - if err := c.Unmarshal(d, m); err != nil { - if rErr, ok := err.(rpcError); ok { - return rErr - } else { - return Errorf(codes.Internal, "grpc: %v", err) - } + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - default: - return Errorf(codes.Internal, "gprc: compression is not supported yet.") + } + if err := c.Unmarshal(d, m); err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } return nil } @@ -217,8 +334,8 @@ type rpcError struct { desc string } -func (e rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) +func (e *rpcError) Error() string { + return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) } // Code returns the error code for err if it was produced by the rpc system. @@ -227,7 +344,7 @@ func Code(err error) codes.Code { if err == nil { return codes.OK } - if e, ok := err.(rpcError); ok { + if e, ok := err.(*rpcError); ok { return e.code } return codes.Unknown @@ -239,7 +356,7 @@ func ErrorDesc(err error) string { if err == nil { return "" } - if e, ok := err.(rpcError); ok { + if e, ok := err.(*rpcError); ok { return e.desc } return err.Error() @@ -251,7 +368,7 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { if c == codes.OK { return nil } - return rpcError{ + return &rpcError{ code: c, desc: fmt.Sprintf(format, a...), } @@ -260,18 +377,37 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into a rpcError. func toRPCErr(err error) error { switch e := err.(type) { - case rpcError: + case *rpcError: return err case transport.StreamError: - return rpcError{ + return &rpcError{ code: e.Code, desc: e.Desc, } case transport.ConnectionError: - return rpcError{ + return &rpcError{ code: codes.Internal, desc: e.Desc, } + default: + switch err { + case context.DeadlineExceeded: + return &rpcError{ + code: codes.DeadlineExceeded, + desc: err.Error(), + } + case context.Canceled: + return &rpcError{ + code: codes.Canceled, + desc: err.Error(), + } + case ErrClientConnClosing: + return &rpcError{ + code: codes.FailedPrecondition, + desc: err.Error(), + } + } + } return Errorf(codes.Unknown, "%v", err) } @@ -304,34 +440,10 @@ func convertCode(err error) codes.Code { return codes.Unknown } -const ( - // how long to wait after the first failure before retrying - baseDelay = 1.0 * time.Second - // upper bound of backoff delay - maxDelay = 120 * time.Second - // backoff increases by this factor on each retry - backoffFactor = 1.6 - // backoff is randomized downwards by this factor - backoffJitter = 0.2 -) - -func backoff(retries int) (t time.Duration) { - if retries == 0 { - return baseDelay - } - backoff, max := float64(baseDelay), float64(maxDelay) - for backoff < max && retries > 0 { - backoff *= backoffFactor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + backoffJitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} +// SupportPackageIsVersion3 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the grpc package. +// +// This constant may be renamed in the future if a change in the generated code +// requires a synchronised update of grpc-go and protoc-gen-go. This constant +// should not be referenced from any other code. +const SupportPackageIsVersion3 = true diff --git a/vendor/google.golang.org/grpc/rpc_util_test.go b/vendor/google.golang.org/grpc/rpc_util_test.go new file mode 100644 index 00000000000..5a802d65137 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util_test.go @@ -0,0 +1,233 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + perfpb "google.golang.org/grpc/test/codec_perf" + "google.golang.org/grpc/transport" +) + +func TestSimpleParsing(t *testing.T) { + bigMsg := bytes.Repeat([]byte{'x'}, 1<<24) + for _, test := range []struct { + // input + p []byte + // outputs + err error + b []byte + pt payloadFormat + }{ + {nil, io.EOF, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 0}, nil, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 1, 'a'}, nil, []byte{'a'}, compressionNone}, + {[]byte{1, 0}, io.ErrUnexpectedEOF, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 10, 'a'}, io.ErrUnexpectedEOF, nil, compressionNone}, + // Check that messages with length >= 2^24 are parsed. + {append([]byte{0, 1, 0, 0, 0}, bigMsg...), nil, bigMsg, compressionNone}, + } { + buf := bytes.NewReader(test.p) + parser := &parser{r: buf} + pt, b, err := parser.recvMsg() + if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { + t.Fatalf("parser{%v}.recvMsg() = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err) + } + } +} + +func TestMultipleParsing(t *testing.T) { + // Set a byte stream consists of 3 messages with their headers. + p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} + b := bytes.NewReader(p) + parser := &parser{r: b} + + wantRecvs := []struct { + pt payloadFormat + data []byte + }{ + {compressionNone, []byte("a")}, + {compressionNone, []byte("bc")}, + {compressionNone, []byte("d")}, + } + for i, want := range wantRecvs { + pt, data, err := parser.recvMsg() + if err != nil || pt != want.pt || !reflect.DeepEqual(data, want.data) { + t.Fatalf("after %d calls, parser{%v}.recvMsg() = %v, %v, %v\nwant %v, %v, ", + i, p, pt, data, err, want.pt, want.data) + } + } + + pt, data, err := parser.recvMsg() + if err != io.EOF { + t.Fatalf("after %d recvMsgs calls, parser{%v}.recvMsg() = %v, %v, %v\nwant _, _, %v", + len(wantRecvs), p, pt, data, err, io.EOF) + } +} + +func TestEncode(t *testing.T) { + for _, test := range []struct { + // input + msg proto.Message + cp Compressor + // outputs + b []byte + err error + }{ + {nil, nil, []byte{0, 0, 0, 0, 0}, nil}, + } { + b, err := encode(protoCodec{}, test.msg, nil, nil) + if err != test.err || !bytes.Equal(b, test.b) { + t.Fatalf("encode(_, _, %v, _) = %v, %v\nwant %v, %v", test.cp, b, err, test.b, test.err) + } + } +} + +func TestCompress(t *testing.T) { + for _, test := range []struct { + // input + data []byte + cp Compressor + dc Decompressor + // outputs + err error + }{ + {make([]byte, 1024), &gzipCompressor{}, &gzipDecompressor{}, nil}, + } { + b := new(bytes.Buffer) + if err := test.cp.Do(b, test.data); err != test.err { + t.Fatalf("Compressor.Do(_, %v) = %v, want %v", test.data, err, test.err) + } + if b.Len() >= len(test.data) { + t.Fatalf("The compressor fails to compress data.") + } + if p, err := test.dc.Do(b); err != nil || !bytes.Equal(test.data, p) { + t.Fatalf("Decompressor.Do(%v) = %v, %v, want %v, ", b, p, err, test.data) + } + } +} + +func TestToRPCErr(t *testing.T) { + for _, test := range []struct { + // input + errIn error + // outputs + errOut *rpcError + }{ + {transport.StreamErrorf(codes.Unknown, ""), Errorf(codes.Unknown, "").(*rpcError)}, + {transport.ErrConnClosing, Errorf(codes.Internal, transport.ErrConnClosing.Desc).(*rpcError)}, + } { + err := toRPCErr(test.errIn) + rpcErr, ok := err.(*rpcError) + if !ok { + t.Fatalf("toRPCErr{%v} returned type %T, want %T", test.errIn, err, rpcError{}) + } + if *rpcErr != *test.errOut { + t.Fatalf("toRPCErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) + } + } +} + +func TestContextErr(t *testing.T) { + for _, test := range []struct { + // input + errIn error + // outputs + errOut transport.StreamError + }{ + {context.DeadlineExceeded, transport.StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded)}, + {context.Canceled, transport.StreamErrorf(codes.Canceled, "%v", context.Canceled)}, + } { + err := transport.ContextErr(test.errIn) + if err != test.errOut { + t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) + } + } +} + +func TestErrorsWithSameParameters(t *testing.T) { + const description = "some description" + e1 := Errorf(codes.AlreadyExists, description) + e2 := Errorf(codes.AlreadyExists, description) + if e1 == e2 { + t.Fatalf("Error interfaces should not be considered equal - e1: %p - %v e2: %p - %v", e1, e1, e2, e2) + } + if Code(e1) != Code(e2) || ErrorDesc(e1) != ErrorDesc(e2) { + t.Fatalf("Expected errors to have same code and description - e1: %p - %v e2: %p - %v", e1, e1, e2, e2) + } +} + +// bmEncode benchmarks encoding a Protocol Buffer message containing mSize +// bytes. +func bmEncode(b *testing.B, mSize int) { + msg := &perfpb.Buffer{Body: make([]byte, mSize)} + encoded, _ := encode(protoCodec{}, msg, nil, nil) + encodedSz := int64(len(encoded)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + encode(protoCodec{}, msg, nil, nil) + } + b.SetBytes(encodedSz) +} + +func BenchmarkEncode1B(b *testing.B) { + bmEncode(b, 1) +} + +func BenchmarkEncode1KiB(b *testing.B) { + bmEncode(b, 1024) +} + +func BenchmarkEncode8KiB(b *testing.B) { + bmEncode(b, 8*1024) +} + +func BenchmarkEncode64KiB(b *testing.B) { + bmEncode(b, 64*1024) +} + +func BenchmarkEncode512KiB(b *testing.B) { + bmEncode(b, 512*1024) +} + +func BenchmarkEncode1MiB(b *testing.B) { + bmEncode(b, 1024*1024) +} diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 487a75c5eea..a2b2b94d48e 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -34,10 +34,12 @@ package grpc import ( + "bytes" "errors" "fmt" "io" "net" + "net/http" "reflect" "runtime" "strings" @@ -45,15 +47,17 @@ import ( "time" "golang.org/x/net/context" + "golang.org/x/net/http2" "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -69,6 +73,7 @@ type ServiceDesc struct { HandlerType interface{} Methods []MethodDesc Streams []StreamDesc + Metadata interface{} } // service consists of the information of the server serving this service and @@ -77,22 +82,29 @@ type service struct { server interface{} // the server for service methods md map[string]*MethodDesc sd map[string]*StreamDesc + mdata interface{} } // Server is a gRPC server to serve RPC requests. type Server struct { - opts options - mu sync.Mutex + opts options + + mu sync.Mutex // guards following lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + conns map[io.Closer]bool m map[string]*service // service name -> service info events trace.EventLog } type options struct { - creds credentials.Credentials + creds credentials.TransportCredentials codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor maxConcurrentStreams uint32 + useHandlerImpl bool // use http.Handler-based server } // A ServerOption sets options. @@ -105,6 +117,20 @@ func CustomCodec(codec Codec) ServerOption { } } +// RPCCompressor returns a ServerOption that sets a compressor for outbound message. +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound message. +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { @@ -114,12 +140,35 @@ func MaxConcurrentStreams(n uint32) ServerOption { } // Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.Credentials) ServerOption { +func Creds(c credentials.TransportCredentials) ServerOption { return func(o *options) { o.creds = c } } +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor has been set.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor has been set.") + } + o.streamInt = i + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -134,7 +183,7 @@ func NewServer(opt ...ServerOption) *Server { s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[io.Closer]bool), m: make(map[string]*service), } if EnableTracing { @@ -183,6 +232,7 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { server: ss, md: make(map[string]*MethodDesc), sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] @@ -195,100 +245,250 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.m[sd.ServiceName] = srv } +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]*ServiceInfo { + ret := make(map[string]*ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = &ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + var ( // ErrServerStopped indicates that the operation is now illegal because of // the server being stopped. ErrServerStopped = errors.New("grpc: the server has been stopped") ) +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines -// read gRPC request and then call the registered handlers to reply to them. -// Service returns when lis.Accept fails. +// read gRPC requests and then call the registered handlers to reply to them. +// Service returns when lis.Accept fails. lis will be closed when +// this method returns. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") if s.lis == nil { s.mu.Unlock() + lis.Close() return ErrServerStopped } s.lis[lis] = true s.mu.Unlock() defer func() { - lis.Close() s.mu.Lock() - delete(s.lis, lis) + if s.lis != nil && s.lis[lis] { + lis.Close() + delete(s.lis, lis) + } s.mu.Unlock() }() for { - c, err := lis.Accept() + rawConn, err := lis.Accept() if err != nil { s.mu.Lock() s.printf("done serving; Accept = %v", err) s.mu.Unlock() return err } - var authInfo credentials.AuthInfo - if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { - var conn net.Conn - conn, authInfo, err = creds.ServerHandshake(c) - if err != nil { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Println("grpc: Server.Serve failed to complete security handshake.") - continue - } - c = conn - } + // Start a new goroutine to deal with rawConn + // so we don't stall this Accept loop goroutine. + go s.handleRawConn(rawConn) + } +} + +// handleRawConn is run in its own goroutine and handles a just-accepted +// connection that has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - c.Close() - return nil - } - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) - if err != nil { - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - continue - } - s.conns[st] = true + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + if s.opts.useHandlerImpl { + s.serveUsingHandler(conn) + } else { + s.serveNewHTTP2Transport(conn, authInfo) + } +} + +// serveNewHTTP2Transport sets up a new http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() + c.Close() + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + st.Close() + return + } + s.serveStreams(st) +} +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) go func() { - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), - } - trInfo.firstLine.client = false - trInfo.firstLine.remoteAddr = st.RemoteAddr() - stream.TraceContext(trInfo.tr) - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = dl.Sub(time.Now()) - } - } - wg.Add(1) - go func() { - s.handleStream(st, stream, trInfo) - wg.Done() - }() - }) - wg.Wait() - s.mu.Lock() - delete(s.conns, st) - s.mu.Unlock() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) }() + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + if !s.addConn(conn) { + conn.Close() + return + } + defer s.removeConn(conn) + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + st.Close() + return } + defer s.removeConn(st) + s.serveStreams(st) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error { - p, err := encode(s.opts.codec, msg, pf) +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + trInfo = &traceInfo{ + tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + stream.TraceContext(trInfo.tr) + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + return false + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var cbuf *bytes.Buffer + if cp != nil { + cbuf = new(bytes.Buffer) + } + p, err := encode(s.opts.codec, msg, cp, cbuf) if err != nil { // This typically indicates a fatal issue (e.g., memory // corruption or hardware faults) the application program @@ -314,114 +514,162 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - p := &parser{s: stream} + if s.opts.cp != nil { + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + stream.SetSendCompress(s.opts.cp.Type()) + } + p := &parser{r: stream} for { pf, req, err := p.recvMsg() if err == io.EOF { // The entire stream is done (for unary RPC only). return err } + if err == io.ErrUnexpectedEOF { + err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} + } if err != nil { switch err := err.(type) { case transport.ConnectionError: // Nothing to do here. case transport.StreamError: if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) } return err } - switch pf { - case compressionNone: - statusCode := codes.OK - statusDesc := "" - df := func(v interface{}) error { - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df) - if appErr != nil { - if err, ok := appErr.(rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + switch err := err.(type) { + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - if trInfo != nil && statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(statusDesc), true) - trInfo.tr.SetError() + default: + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + } + return err + } + statusCode := codes.OK + statusDesc := "" + df := func(v interface{}) error { + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } return err } - return nil + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return err } if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) } - opts := &transport.Options{ - Last: true, - Delay: false, + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + if err, ok := appErr.(*rpcError); ok { + statusCode = err.code + statusDesc = err.desc + } else { + statusCode = convertCode(appErr) + statusDesc = appErr.Error() } - if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { - switch err := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - statusCode = err.Code - statusDesc = err.Desc - default: - statusCode = codes.Unknown - statusDesc = err.Error() - } + if trInfo != nil && statusCode != codes.OK { + trInfo.tr.LazyLog(stringer(statusDesc), true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) return err } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + return nil + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: + statusCode = codes.Unknown + statusDesc = err.Error() } - return t.WriteStatus(stream, statusCode, statusDesc) - default: - panic(fmt.Sprintf("payload format to be supported: %d", pf)) + return err } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) } } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } ss := &serverStream{ t: t, s: stream, - p: &parser{s: stream}, + p: &parser{r: stream}, codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, trInfo: trInfo, } + if ss.cp != nil { + ss.cbuf = new(bytes.Buffer) + } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) defer func() { ss.mu.Lock() if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() } - trInfo.tr.Finish() - trInfo.tr = nil + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil ss.mu.Unlock() }() } - if appErr := sd.Handler(srv.server, ss); appErr != nil { - if err, ok := appErr.(rpcError); ok { + var appErr error + if s.opts.streamInt == nil { + appErr = sd.Handler(srv.server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) + } + if appErr != nil { + if err, ok := appErr.(*rpcError); ok { ss.statusCode = err.code ss.statusDesc = err.desc + } else if err, ok := appErr.(transport.StreamError); ok { + ss.statusCode = err.Code + ss.statusDesc = err.Desc } else { ss.statusCode = convertCode(appErr) ss.statusDesc = appErr.Error() @@ -430,10 +678,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if ss.statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(ss.statusDesc), true) - trInfo.tr.SetError() + ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) + ss.trInfo.tr.SetError() } else { - trInfo.tr.LazyLog(stringer("OK"), false) + ss.trInfo.tr.LazyLog(stringer("OK"), false) } ss.mu.Unlock() } @@ -448,18 +696,40 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } pos := strings.LastIndex(sm, "/") if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } + if trInfo != nil { + trInfo.tr.Finish() + } return } service := sm[:pos] method := sm[pos+1:] srv, ok := s.m[service] if !ok { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } + if trInfo != nil { + trInfo.tr.Finish() + } return } // Unary RPC or Streaming RPC? @@ -471,13 +741,27 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str s.processStreamingRPC(t, stream, srv, sd, trInfo) return } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } + if trInfo != nil { + trInfo.tr.Finish() + } } -// Stop stops the gRPC server. Once Stop returns, the server stops accepting -// connection requests and closes all the connected connections. +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. func (s *Server) Stop() { s.mu.Lock() listeners := s.lis @@ -485,12 +769,14 @@ func (s *Server) Stop() { cs := s.conns s.conns = nil s.mu.Unlock() + for lis := range listeners { lis.Close() } for c := range cs { c.Close() } + s.mu.Lock() if s.events != nil { s.events.Finish() @@ -499,14 +785,23 @@ func (s *Server) Stop() { s.mu.Unlock() } -// TestingCloseConns closes all exiting transports but keeps s.lis accepting new -// connections. This is for test only now. -func (s *Server) TestingCloseConns() { +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { s.mu.Lock() for c := range s.conns { c.Close() + delete(s.conns, c) } - s.conns = make(map[transport.ServerTransport]bool) s.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/server_test.go b/vendor/google.golang.org/grpc/server_test.go new file mode 100644 index 00000000000..eb598a516cc --- /dev/null +++ b/vendor/google.golang.org/grpc/server_test.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "net" + "reflect" + "strings" + "testing" +) + +type emptyServiceServer interface{} + +type testServer struct{} + +func TestStopBeforeServe(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to create listener: %v", err) + } + + server := NewServer() + server.Stop() + err = server.Serve(lis) + if err != ErrServerStopped { + t.Fatalf("server.Serve() error = %v, want %v", err, ErrServerStopped) + } + + // server.Serve is responsible for closing the listener, even if the + // server was already stopped. + err = lis.Close() + if got, want := ErrorDesc(err), "use of closed network connection"; !strings.Contains(got, want) { + t.Errorf("Close() error = %q, want %q", got, want) + } +} + +func TestGetServiceInfo(t *testing.T) { + testSd := ServiceDesc{ + ServiceName: "grpc.testing.EmptyService", + HandlerType: (*emptyServiceServer)(nil), + Methods: []MethodDesc{ + { + MethodName: "EmptyCall", + Handler: nil, + }, + }, + Streams: []StreamDesc{ + { + StreamName: "EmptyStream", + Handler: nil, + ServerStreams: false, + ClientStreams: true, + }, + }, + Metadata: []int{0, 2, 1, 3}, + } + + server := NewServer() + server.RegisterService(&testSd, &testServer{}) + + info := server.GetServiceInfo() + want := map[string]*ServiceInfo{ + "grpc.testing.EmptyService": &ServiceInfo{ + Methods: []MethodInfo{ + MethodInfo{ + Name: "EmptyCall", + IsClientStream: false, + IsServerStream: false, + }, + MethodInfo{ + Name: "EmptyStream", + IsClientStream: true, + IsServerStream: false, + }}, + Metadata: []int{0, 2, 1, 3}, + }, + } + + if !reflect.DeepEqual(info, want) { + t.Errorf("GetServiceInfo() = %q, want %q", info, want) + } +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 2370dd0e9d5..4c3136c9933 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -34,6 +34,7 @@ package grpc import ( + "bytes" "errors" "io" "sync" @@ -46,12 +47,14 @@ import ( "google.golang.org/grpc/transport" ) -type streamHandler func(srv interface{}, stream ServerStream) error +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. type StreamDesc struct { StreamName string - Handler streamHandler + Handler StreamHandler // At least one of these is true. ServerStreams bool @@ -66,26 +69,24 @@ type Stream interface { // breaks. // On error, it aborts the stream and returns an RPC status on client // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the streama nd returns an RPC status. On + // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. RecvMsg(m interface{}) error } -// ClientStream defines the interface a client stream has to satify. +// ClientStream defines the interface a client stream has to satisfy. type ClientStream interface { - // Header returns the header metedata received from the server if there + // Header returns the header metadata received from the server if there // is any. It blocks if the metadata is not ready to read. Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server. It must be called - // after stream.Recv() returns non-nil error (including io.EOF) for - // bi-directional streaming and server streaming or stream.CloseAndRecv() - // returns for client streaming in order to receive trailer metadata if - // present. Otherwise, it could returns an empty MD even though trailer - // is present. + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. @@ -98,22 +99,37 @@ type ClientStream interface { func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { var ( t transport.ClientTransport + s *transport.Stream err error + put func() ) - t, err = cc.dopts.picker.Pick(ctx) - if err != nil { - return nil, toRPCErr(err) + c := defaultCallInfo + for _, o := range opts { + if err := o.before(&c); err != nil { + return nil, toRPCErr(err) + } } - // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, + Flush: desc.ServerStreams && desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ + opts: opts, + c: c, desc: desc, codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, tracing: EnableTracing, } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cs.cbuf = new(bytes.Buffer) + } if cs.tracing { cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) cs.trInfo.firstLine.client = true @@ -123,33 +139,88 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) ctx = trace.NewContext(ctx, cs.trInfo.tr) } - s, err := t.NewStream(ctx, callHdr) - if err != nil { - return nil, toRPCErr(err) + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, + } + for { + t, put, err = cc.getTransport(ctx, gopts) + if err != nil { + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := err.(*rpcError); ok { + return nil, err + } + if err == errConnClosing { + if c.failFast { + return nil, Errorf(codes.Unavailable, "%v", errConnClosing) + } + continue + } + // All the other errors are treated as Internal errors. + return nil, Errorf(codes.Internal, "%v", err) + } + + s, err = t.NewStream(ctx, callHdr) + if err != nil { + if put != nil { + put() + put = nil + } + if _, ok := err.(transport.ConnectionError); ok { + if c.failFast { + cs.finish(err) + return nil, toRPCErr(err) + } + continue + } + return nil, toRPCErr(err) + } + break } + cs.put = put cs.t = t cs.s = s - cs.p = &parser{s: s} - // Listen on ctx.Done() to detect cancellation when there is no pending - // I/O operations on this stream. + cs.p = &parser{r: s} + // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination + // when there is no pending I/O operations on this stream. go func() { - <-s.Context().Done() - cs.closeTransportStream(transport.ContextErr(s.Context().Err())) + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-s.Done(): + // TODO: The trace of the RPC is terminated here when there is no pending + // I/O, which is probably not the optimal solution. + if s.StatusCode() == codes.OK { + cs.finish(nil) + } else { + cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) + } + cs.closeTransportStream(nil) + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } }() return cs, nil } // clientStream implements a client side Stream. type clientStream struct { + opts []CallOption + c callInfo t transport.ClientTransport s *transport.Stream p *parser desc *StreamDesc codec Codec + cp Compressor + cbuf *bytes.Buffer + dc Decompressor tracing bool // set to EnableTracing when the clientStream is created. - mu sync.Mutex + mu sync.Mutex + put func() closed bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), // and is set to nil when the clientStream's finish method is called. @@ -183,7 +254,20 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { cs.mu.Unlock() } defer func() { - if err == nil || err == io.EOF { + if err != nil { + cs.finish(err) + } + if err == nil { + return + } + if err == io.EOF { + // Specialize the process for server streaming. SendMesg is only called + // once when creating the stream object. io.EOF needs to be skipped when + // the rpc is early finished (before the stream object is created.). + // TODO: It is probably better to move this into the generated code. + if !cs.desc.ClientStreams && cs.desc.ServerStreams { + err = nil + } return } if _, ok := err.(transport.ConnectionError); !ok { @@ -191,7 +275,12 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } err = toRPCErr(err) }() - out, err := encode(cs.codec, m, compressionNone) + out, err := encode(cs.codec, m, cs.cp, cs.cbuf) + defer func() { + if cs.cbuf != nil { + cs.cbuf.Reset() + } + }() if err != nil { return transport.StreamErrorf(codes.Internal, "grpc: %v", err) } @@ -199,7 +288,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -218,16 +307,17 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { return } // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { + cs.finish(err) return nil } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } @@ -239,15 +329,20 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { // Returns io.EOF to indicate the end of the stream. return } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } func (cs *clientStream) CloseSend() (err error) { err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() if err == nil || err == io.EOF { - return + return nil } if _, ok := err.(transport.ConnectionError); !ok { cs.closeTransportStream(err) @@ -268,11 +363,18 @@ func (cs *clientStream) closeTransportStream(err error) { } func (cs *clientStream) finish(err error) { + cs.mu.Lock() + defer cs.mu.Unlock() + for _, o := range cs.opts { + o.after(&cs.c) + } + if cs.put != nil { + cs.put() + cs.put = nil + } if !cs.tracing { return } - cs.mu.Lock() - defer cs.mu.Unlock() if cs.trInfo.tr != nil { if err == nil || err == io.EOF { cs.trInfo.tr.LazyPrintf("RPC: [OK]") @@ -303,6 +405,9 @@ type serverStream struct { s *transport.Stream p *parser codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer statusCode codes.Code statusDesc string trInfo *traceInfo @@ -341,7 +446,12 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - out, err := encode(ss.codec, m, compressionNone) + out, err := encode(ss.codec, m, ss.cp, ss.cbuf) + defer func() { + if ss.cbuf != nil { + ss.cbuf.Reset() + } + }() if err != nil { err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) return err @@ -364,5 +474,5 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - return recv(ss.p, ss.codec, m) + return recv(ss.p, ss.codec, ss.s, ss.dc, m) } diff --git a/vendor/google.golang.org/grpc/stress/client/main.go b/vendor/google.golang.org/grpc/stress/client/main.go new file mode 100644 index 00000000000..4579aab463a --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/client/main.go @@ -0,0 +1,298 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// client starts an interop client to do stress test and a metrics server to report qps. +package main + +import ( + "flag" + "fmt" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" + metricspb "google.golang.org/grpc/stress/grpc_testing" +) + +var ( + serverAddresses = flag.String("server_addresses", "localhost:8080", "a list of server addresses") + testCases = flag.String("test_cases", "", "a list of test cases along with the relative weights") + testDurationSecs = flag.Int("test_duration_secs", -1, "test duration in seconds") + numChannelsPerServer = flag.Int("num_channels_per_server", 1, "Number of channels (i.e connections) to each server") + numStubsPerChannel = flag.Int("num_stubs_per_channel", 1, "Number of client stubs per each connection to server") + metricsPort = flag.Int("metrics_port", 8081, "The port at which the stress client exposes QPS metrics") +) + +// testCaseWithWeight contains the test case type and its weight. +type testCaseWithWeight struct { + name string + weight int +} + +// parseTestCases converts test case string to a list of struct testCaseWithWeight. +func parseTestCases(testCaseString string) []testCaseWithWeight { + testCaseStrings := strings.Split(testCaseString, ",") + testCases := make([]testCaseWithWeight, len(testCaseStrings)) + for i, str := range testCaseStrings { + testCase := strings.Split(str, ":") + if len(testCase) != 2 { + panic(fmt.Sprintf("invalid test case with weight: %s", str)) + } + // Check if test case is supported. + switch testCase[0] { + case + "empty_unary", + "large_unary", + "client_streaming", + "server_streaming", + "empty_stream": + default: + panic(fmt.Sprintf("unknown test type: %s", testCase[0])) + } + testCases[i].name = testCase[0] + w, err := strconv.Atoi(testCase[1]) + if err != nil { + panic(fmt.Sprintf("%v", err)) + } + testCases[i].weight = w + } + return testCases +} + +// weightedRandomTestSelector defines a weighted random selector for test case types. +type weightedRandomTestSelector struct { + tests []testCaseWithWeight + totalWeight int +} + +// newWeightedRandomTestSelector constructs a weightedRandomTestSelector with the given list of testCaseWithWeight. +func newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTestSelector { + var totalWeight int + for _, t := range tests { + totalWeight += t.weight + } + rand.Seed(time.Now().UnixNano()) + return &weightedRandomTestSelector{tests, totalWeight} +} + +func (selector weightedRandomTestSelector) getNextTest() string { + random := rand.Intn(selector.totalWeight) + var weightSofar int + for _, test := range selector.tests { + weightSofar += test.weight + if random < weightSofar { + return test.name + } + } + panic("no test case selected by weightedRandomTestSelector") +} + +// gauge stores the qps of one interop client (one stub). +type gauge struct { + mutex sync.RWMutex + val int64 +} + +func (g *gauge) set(v int64) { + g.mutex.Lock() + defer g.mutex.Unlock() + g.val = v +} + +func (g *gauge) get() int64 { + g.mutex.RLock() + defer g.mutex.RUnlock() + return g.val +} + +// server implements metrics server functions. +type server struct { + mutex sync.RWMutex + // gauges is a map from /stress_test/server_/channel_/stub_/qps to its qps gauge. + gauges map[string]*gauge +} + +// newMetricsServer returns a new metrics server. +func newMetricsServer() *server { + return &server{gauges: make(map[string]*gauge)} +} + +// GetAllGauges returns all gauges. +func (s *server) GetAllGauges(in *metricspb.EmptyMessage, stream metricspb.MetricsService_GetAllGaugesServer) error { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for name, gauge := range s.gauges { + if err := stream.Send(&metricspb.GaugeResponse{Name: name, Value: &metricspb.GaugeResponse_LongValue{LongValue: gauge.get()}}); err != nil { + return err + } + } + return nil +} + +// GetGauge returns the gauge for the given name. +func (s *server) GetGauge(ctx context.Context, in *metricspb.GaugeRequest) (*metricspb.GaugeResponse, error) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + if g, ok := s.gauges[in.Name]; ok { + return &metricspb.GaugeResponse{Name: in.Name, Value: &metricspb.GaugeResponse_LongValue{LongValue: g.get()}}, nil + } + return nil, grpc.Errorf(codes.InvalidArgument, "gauge with name %s not found", in.Name) +} + +// createGauge creates a guage using the given name in metrics server. +func (s *server) createGauge(name string) *gauge { + s.mutex.Lock() + defer s.mutex.Unlock() + + if _, ok := s.gauges[name]; ok { + // gauge already exists. + panic(fmt.Sprintf("gauge %s already exists", name)) + } + var g gauge + s.gauges[name] = &g + return &g +} + +func startServer(server *server, port int) { + lis, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + + s := grpc.NewServer() + metricspb.RegisterMetricsServiceServer(s, server) + s.Serve(lis) + +} + +// performRPCs uses weightedRandomTestSelector to select test case and runs the tests. +func performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTestSelector, stop <-chan bool) { + client := testpb.NewTestServiceClient(conn) + var numCalls int64 + startTime := time.Now() + for { + done := make(chan bool, 1) + go func() { + test := selector.getNextTest() + switch test { + case "empty_unary": + interop.DoEmptyUnaryCall(client) + case "large_unary": + interop.DoLargeUnaryCall(client) + case "client_streaming": + interop.DoClientStreaming(client) + case "server_streaming": + interop.DoServerStreaming(client) + case "empty_stream": + interop.DoEmptyStream(client) + } + done <- true + }() + select { + case <-stop: + return + case <-done: + numCalls++ + gauge.set(int64(float64(numCalls) / time.Since(startTime).Seconds())) + } + } +} + +func logParameterInfo(addresses []string, tests []testCaseWithWeight) { + grpclog.Printf("server_addresses: %s", *serverAddresses) + grpclog.Printf("test_cases: %s", *testCases) + grpclog.Printf("test_duration-secs: %d", *testDurationSecs) + grpclog.Printf("num_channels_per_server: %d", *numChannelsPerServer) + grpclog.Printf("num_stubs_per_channel: %d", *numStubsPerChannel) + grpclog.Printf("metrics_port: %d", *metricsPort) + + grpclog.Println("addresses:") + for i, addr := range addresses { + grpclog.Printf("%d. %s\n", i+1, addr) + } + grpclog.Println("tests:") + for i, test := range tests { + grpclog.Printf("%d. %v\n", i+1, test) + } +} + +func main() { + flag.Parse() + addresses := strings.Split(*serverAddresses, ",") + tests := parseTestCases(*testCases) + logParameterInfo(addresses, tests) + testSelector := newWeightedRandomTestSelector(tests) + metricsServer := newMetricsServer() + + var wg sync.WaitGroup + wg.Add(len(addresses) * *numChannelsPerServer * *numStubsPerChannel) + stop := make(chan bool) + + for serverIndex, address := range addresses { + for connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ { + conn, err := grpc.Dial(address, grpc.WithInsecure()) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + for clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ { + name := fmt.Sprintf("/stress_test/server_%d/channel_%d/stub_%d/qps", serverIndex+1, connIndex+1, clientIndex+1) + go func() { + defer wg.Done() + g := metricsServer.createGauge(name) + performRPCs(g, conn, testSelector, stop) + }() + } + + } + } + go startServer(metricsServer, *metricsPort) + if *testDurationSecs > 0 { + time.Sleep(time.Duration(*testDurationSecs) * time.Second) + close(stop) + } + wg.Wait() + grpclog.Printf(" ===== ALL DONE ===== ") + +} diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go new file mode 100644 index 00000000000..4ad4ccdb295 --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + GaugeResponse + GaugeRequest + EmptyMessage +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Reponse message containing the gauge name and value +type GaugeResponse struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Types that are valid to be assigned to Value: + // *GaugeResponse_LongValue + // *GaugeResponse_DoubleValue + // *GaugeResponse_StringValue + Value isGaugeResponse_Value `protobuf_oneof:"value"` +} + +func (m *GaugeResponse) Reset() { *m = GaugeResponse{} } +func (m *GaugeResponse) String() string { return proto.CompactTextString(m) } +func (*GaugeResponse) ProtoMessage() {} +func (*GaugeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isGaugeResponse_Value interface { + isGaugeResponse_Value() +} + +type GaugeResponse_LongValue struct { + LongValue int64 `protobuf:"varint,2,opt,name=long_value,json=longValue,oneof"` +} +type GaugeResponse_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` +} +type GaugeResponse_StringValue struct { + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,oneof"` +} + +func (*GaugeResponse_LongValue) isGaugeResponse_Value() {} +func (*GaugeResponse_DoubleValue) isGaugeResponse_Value() {} +func (*GaugeResponse_StringValue) isGaugeResponse_Value() {} + +func (m *GaugeResponse) GetValue() isGaugeResponse_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *GaugeResponse) GetLongValue() int64 { + if x, ok := m.GetValue().(*GaugeResponse_LongValue); ok { + return x.LongValue + } + return 0 +} + +func (m *GaugeResponse) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*GaugeResponse_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *GaugeResponse) GetStringValue() string { + if x, ok := m.GetValue().(*GaugeResponse_StringValue); ok { + return x.StringValue + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GaugeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GaugeResponse_OneofMarshaler, _GaugeResponse_OneofUnmarshaler, _GaugeResponse_OneofSizer, []interface{}{ + (*GaugeResponse_LongValue)(nil), + (*GaugeResponse_DoubleValue)(nil), + (*GaugeResponse_StringValue)(nil), + } +} + +func _GaugeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GaugeResponse) + // value + switch x := m.Value.(type) { + case *GaugeResponse_LongValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.LongValue)) + case *GaugeResponse_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *GaugeResponse_StringValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case nil: + default: + return fmt.Errorf("GaugeResponse.Value has unexpected type %T", x) + } + return nil +} + +func _GaugeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GaugeResponse) + switch tag { + case 2: // value.long_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &GaugeResponse_LongValue{int64(x)} + return true, err + case 3: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &GaugeResponse_DoubleValue{math.Float64frombits(x)} + return true, err + case 4: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &GaugeResponse_StringValue{x} + return true, err + default: + return false, nil + } +} + +func _GaugeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GaugeResponse) + // value + switch x := m.Value.(type) { + case *GaugeResponse_LongValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.LongValue)) + case *GaugeResponse_DoubleValue: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *GaugeResponse_StringValue: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message containing the gauge name +type GaugeRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GaugeRequest) Reset() { *m = GaugeRequest{} } +func (m *GaugeRequest) String() string { return proto.CompactTextString(m) } +func (*GaugeRequest) ProtoMessage() {} +func (*GaugeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type EmptyMessage struct { +} + +func (m *EmptyMessage) Reset() { *m = EmptyMessage{} } +func (m *EmptyMessage) String() string { return proto.CompactTextString(m) } +func (*EmptyMessage) ProtoMessage() {} +func (*EmptyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func init() { + proto.RegisterType((*GaugeResponse)(nil), "grpc.testing.GaugeResponse") + proto.RegisterType((*GaugeRequest)(nil), "grpc.testing.GaugeRequest") + proto.RegisterType((*EmptyMessage)(nil), "grpc.testing.EmptyMessage") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for MetricsService service + +type MetricsServiceClient interface { + // Returns the values of all the gauges that are currently being maintained by + // the service + GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) + // Returns the value of one gauge + GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_MetricsService_serviceDesc.Streams[0], c.cc, "/grpc.testing.MetricsService/GetAllGauges", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceGetAllGaugesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type MetricsService_GetAllGaugesClient interface { + Recv() (*GaugeResponse, error) + grpc.ClientStream +} + +type metricsServiceGetAllGaugesClient struct { + grpc.ClientStream +} + +func (x *metricsServiceGetAllGaugesClient) Recv() (*GaugeResponse, error) { + m := new(GaugeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *metricsServiceClient) GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) { + out := new(GaugeResponse) + err := grpc.Invoke(ctx, "/grpc.testing.MetricsService/GetGauge", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for MetricsService service + +type MetricsServiceServer interface { + // Returns the values of all the gauges that are currently being maintained by + // the service + GetAllGauges(*EmptyMessage, MetricsService_GetAllGaugesServer) error + // Returns the value of one gauge + GetGauge(context.Context, *GaugeRequest) (*GaugeResponse, error) +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_GetAllGauges_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EmptyMessage) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MetricsServiceServer).GetAllGauges(m, &metricsServiceGetAllGaugesServer{stream}) +} + +type MetricsService_GetAllGaugesServer interface { + Send(*GaugeResponse) error + grpc.ServerStream +} + +type metricsServiceGetAllGaugesServer struct { + grpc.ServerStream +} + +func (x *metricsServiceGetAllGaugesServer) Send(m *GaugeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _MetricsService_GetGauge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GaugeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).GetGauge(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.MetricsService/GetGauge", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).GetGauge(ctx, req.(*GaugeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetGauge", + Handler: _MetricsService_GetGauge_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetAllGauges", + Handler: _MetricsService_GetAllGauges_Handler, + ServerStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x4d, 0x2d, 0x29, + 0xca, 0x4c, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, + 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0x9a, 0xce, 0xc8, 0xc5, 0xeb, 0x9e, 0x58, 0x9a, + 0x9e, 0x1a, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x24, 0xc4, 0xc5, 0x92, 0x97, 0x98, + 0x9b, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b, 0xc9, 0x73, 0x71, 0xe5, 0xe4, + 0xe7, 0xa5, 0xc7, 0x97, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x01, 0x65, 0x98, 0x3d, 0x18, 0x82, + 0x38, 0x41, 0x62, 0x61, 0x20, 0x21, 0x21, 0x65, 0x2e, 0x9e, 0x94, 0xfc, 0xd2, 0xa4, 0x9c, 0x54, + 0xa8, 0x12, 0x66, 0xa0, 0x12, 0x46, 0xa0, 0x12, 0x6e, 0x88, 0x28, 0x5c, 0x51, 0x31, 0xd0, 0x25, + 0x70, 0x73, 0x58, 0x40, 0x36, 0x80, 0x14, 0x41, 0x44, 0xc1, 0x8a, 0x9c, 0xd8, 0xb9, 0x58, 0xc1, + 0xb2, 0x4a, 0x4a, 0x5c, 0x3c, 0x50, 0x87, 0x15, 0x96, 0x02, 0x1d, 0x8b, 0xcd, 0x5d, 0x4a, 0x7c, + 0x5c, 0x3c, 0xae, 0xb9, 0x05, 0x25, 0x95, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x46, 0x0b, + 0x18, 0xb9, 0xf8, 0x7c, 0x21, 0xbe, 0x0d, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x15, 0xf2, 0x04, + 0x1a, 0x93, 0x5a, 0xe2, 0x98, 0x93, 0x03, 0x36, 0xac, 0x58, 0x48, 0x4a, 0x0f, 0xd9, 0xff, 0x7a, + 0xc8, 0xda, 0xa5, 0xa4, 0x51, 0xe5, 0x50, 0xc2, 0xc5, 0x80, 0x51, 0xc8, 0x99, 0x8b, 0x03, 0x68, + 0x14, 0x58, 0x14, 0xdd, 0x18, 0x64, 0x97, 0xe2, 0x35, 0x26, 0x89, 0x0d, 0x1c, 0x0b, 0xc6, 0x80, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x7d, 0xb2, 0xc9, 0x96, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto new file mode 100644 index 00000000000..1202b20b8fd --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto @@ -0,0 +1,64 @@ +// Copyright 2015-2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Contains the definitions for a metrics service and the type of metrics +// exposed by the service. +// +// Currently, 'Gauge' (i.e a metric that represents the measured value of +// something at an instant of time) is the only metric type supported by the +// service. +syntax = "proto3"; + +package grpc.testing; + +// Reponse message containing the gauge name and value +message GaugeResponse { + string name = 1; + oneof value { + int64 long_value = 2; + double double_value = 3; + string string_value = 4; + } +} + +// Request message containing the gauge name +message GaugeRequest { + string name = 1; +} + +message EmptyMessage {} + +service MetricsService { + // Returns the values of all the gauges that are currently being maintained by + // the service + rpc GetAllGauges(EmptyMessage) returns (stream GaugeResponse); + + // Returns the value of one gauge + rpc GetGauge(GaugeRequest) returns (GaugeResponse); +} diff --git a/vendor/google.golang.org/grpc/stress/metrics_client/main.go b/vendor/google.golang.org/grpc/stress/metrics_client/main.go new file mode 100644 index 00000000000..983a8ff24cc --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/metrics_client/main.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "flag" + "fmt" + "io" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + metricspb "google.golang.org/grpc/stress/grpc_testing" +) + +var ( + metricsServerAddress = flag.String("metrics_server_address", "", "The metrics server addresses in the fomrat :") + totalOnly = flag.Bool("total_only", false, "If true, this prints only the total value of all gauges") +) + +func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { + stream, err := client.GetAllGauges(context.Background(), &metricspb.EmptyMessage{}) + if err != nil { + grpclog.Fatalf("failed to call GetAllGuages: %v", err) + } + + var ( + overallQPS int64 + rpcStatus error + ) + for { + gaugeResponse, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + if _, ok := gaugeResponse.GetValue().(*metricspb.GaugeResponse_LongValue); !ok { + panic(fmt.Sprintf("gauge %s is not a long value", gaugeResponse.Name)) + } + v := gaugeResponse.GetLongValue() + if !totalOnly { + grpclog.Printf("%s: %d", gaugeResponse.Name, v) + } + overallQPS += v + } + if rpcStatus != io.EOF { + grpclog.Fatalf("failed to finish server streaming: %v", rpcStatus) + } + grpclog.Printf("overall qps: %d", overallQPS) +} + +func main() { + flag.Parse() + if *metricsServerAddress == "" { + grpclog.Fatalf("Metrics server address is empty.") + } + + conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure()) + if err != nil { + grpclog.Fatalf("cannot connect to metrics server: %v", err) + } + defer conn.Close() + + c := metricspb.NewMetricsServiceClient(conn) + printMetrics(c, *totalOnly) +} diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go b/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go new file mode 100644 index 00000000000..c88756edcc2 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. +// source: perf.proto +// DO NOT EDIT! + +/* +Package codec_perf is a generated protocol buffer package. + +It is generated from these files: + perf.proto + +It has these top-level messages: + Buffer +*/ +package codec_perf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Buffer is a message that contains a body of bytes that is used to exercise +// encoding and decoding overheads. +type Buffer struct { + Body []byte `protobuf:"bytes,1,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Buffer) Reset() { *m = Buffer{} } +func (m *Buffer) String() string { return proto.CompactTextString(m) } +func (*Buffer) ProtoMessage() {} +func (*Buffer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Buffer) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func init() { + proto.RegisterType((*Buffer)(nil), "codec.perf.Buffer") +} + +func init() { proto.RegisterFile("perf.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 73 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x48, 0x2d, 0x4a, + 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4a, 0xce, 0x4f, 0x49, 0x4d, 0xd6, 0x03, 0x89, + 0x28, 0xc9, 0x70, 0xb1, 0x39, 0x95, 0xa6, 0xa5, 0xa5, 0x16, 0x09, 0x09, 0x71, 0xb1, 0x24, 0xe5, + 0xa7, 0x54, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x80, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x3a, 0x58, 0x92, 0x53, 0x36, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.proto b/vendor/google.golang.org/grpc/test/codec_perf/perf.proto new file mode 100644 index 00000000000..4cf561f7f1e --- /dev/null +++ b/vendor/google.golang.org/grpc/test/codec_perf/perf.proto @@ -0,0 +1,11 @@ +// Messages used for performance tests that may not reference grpc directly for +// reasons of import cycles. +syntax = "proto2"; + +package codec.perf; + +// Buffer is a message that contains a body of bytes that is used to exercise +// encoding and decoding overheads. +message Buffer { + optional bytes body = 1; +} diff --git a/vendor/google.golang.org/grpc/test/end2end_test.go b/vendor/google.golang.org/grpc/test/end2end_test.go new file mode 100644 index 00000000000..fdac5815d58 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/end2end_test.go @@ -0,0 +1,2172 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc_test + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "math" + "net" + "os" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "syscall" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/transport" +) + +var ( + // For headers: + testMetadata = metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } + // For trailers: + testTrailerMetadata = metadata.MD{ + "tkey1": []string{"trailerValue1"}, + "tkey2": []string{"trailerValue2"}, + } + // capital "Key" is illegal in HTTP/2. + malformedHTTP2Metadata = metadata.MD{ + "Key": []string{"foo"}, + } + testAppUA = "myApp1/1.0 myApp2/0.9" +) + +var raceMode bool // set by race_test.go in race mode + +type testServer struct { + security string // indicate the authentication protocol used by this server. + earlyFail bool // whether to error out the execution of a service handler prematurely. +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if md, ok := metadata.FromContext(ctx); ok { + // For testing purpose, returns an error if there is attached metadata other than + // the user agent set by the client application. + if _, ok := md["user-agent"]; !ok { + return nil, grpc.Errorf(codes.DataLoss, "missing expected user-agent") + } + var str []string + for _, entry := range md["user-agent"] { + str = append(str, "ua", entry) + } + grpc.SendHeader(ctx, metadata.Pairs(str...)) + } + return new(testpb.Empty), nil +} + +func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { + if size < 0 { + return nil, fmt.Errorf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported") + default: + return nil, fmt.Errorf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + }, nil +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + md, ok := metadata.FromContext(ctx) + if ok { + if _, exists := md[":authority"]; !exists { + return nil, grpc.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) + } + if err := grpc.SendHeader(ctx, md); err != nil { + return nil, fmt.Errorf("grpc.SendHeader(_, %v) = %v, want %v", md, err, nil) + } + grpc.SetTrailer(ctx, testTrailerMetadata) + } + pr, ok := peer.FromContext(ctx) + if !ok { + return nil, fmt.Errorf("failed to get peer from ctx") + } + if pr.Addr == net.Addr(nil) { + return nil, fmt.Errorf("failed to get peer address") + } + if s.security != "" { + // Check Auth info + var authType, serverName string + switch info := pr.AuthInfo.(type) { + case credentials.TLSInfo: + authType = info.AuthType() + serverName = info.State.ServerName + default: + return nil, fmt.Errorf("Unknown AuthInfo type") + } + if authType != s.security { + return nil, fmt.Errorf("Wrong auth type: got %q, want %q", authType, s.security) + } + if serverName != "x.test.youtube.com" { + return nil, fmt.Errorf("Unknown server name %q", serverName) + } + } + // Simulate some service delay. + time.Sleep(time.Second) + + payload, err := newPayload(in.GetResponseType(), in.GetResponseSize()) + if err != nil { + return nil, err + } + + return &testpb.SimpleResponse{ + Payload: payload, + }, nil +} + +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + if md, ok := metadata.FromContext(stream.Context()); ok { + if _, exists := md[":authority"]; !exists { + return grpc.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) + } + // For testing purpose, returns an error if there is attached metadata except for authority. + if len(md) > 1 { + return grpc.Errorf(codes.DataLoss, "got extra metadata") + } + } + cs := args.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(args.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + return nil +} + +func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + var sum int + for { + in, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&testpb.StreamingInputCallResponse{ + AggregatedPayloadSize: proto.Int32(int32(sum)), + }) + } + if err != nil { + return err + } + p := in.GetPayload().GetBody() + sum += len(p) + if s.earlyFail { + return grpc.Errorf(codes.NotFound, "not found") + } + } +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + md, ok := metadata.FromContext(stream.Context()) + if ok { + if err := stream.SendHeader(md); err != nil { + return fmt.Errorf("%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + stream.SetTrailer(md) + } + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + cs := in.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(in.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + } +} + +func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { + var msgBuf []*testpb.StreamingOutputCallRequest + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + break + } + if err != nil { + return err + } + msgBuf = append(msgBuf, in) + } + for _, m := range msgBuf { + cs := m.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(m.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + } + return nil +} + +const tlsDir = "testdata/" + +func unixDialer(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) +} + +type env struct { + name string + network string // The type of network such as tcp, unix, etc. + dialer func(addr string, timeout time.Duration) (net.Conn, error) + security string // The security protocol such as TLS, SSH, etc. + httpHandler bool // whether to use the http.Handler ServerTransport; requires TLS +} + +func (e env) runnable() bool { + if runtime.GOOS == "windows" && strings.HasPrefix(e.name, "unix-") { + return false + } + return true +} + +func (e env) getDialer() func(addr string, timeout time.Duration) (net.Conn, error) { + if e.dialer != nil { + return e.dialer + } + return func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + } +} + +var ( + tcpClearEnv = env{name: "tcp-clear", network: "tcp"} + tcpTLSEnv = env{name: "tcp-tls", network: "tcp", security: "tls"} + unixClearEnv = env{name: "unix-clear", network: "unix", dialer: unixDialer} + unixTLSEnv = env{name: "unix-tls", network: "unix", dialer: unixDialer, security: "tls"} + handlerEnv = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true} + allEnv = []env{tcpClearEnv, tcpTLSEnv, unixClearEnv, unixTLSEnv, handlerEnv} +) + +var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.") + +func listTestEnv() (envs []env) { + if *onlyEnv != "" { + for _, e := range allEnv { + if e.name == *onlyEnv { + if !e.runnable() { + panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS)) + } + return []env{e} + } + } + panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv)) + } + for _, e := range allEnv { + if e.runnable() { + envs = append(envs, e) + } + } + return envs +} + +// test is an end-to-end test. It should be created with the newTest +// func, modified as needed, and then started with its startServer method. +// It should be cleaned up with the tearDown method. +type test struct { + t *testing.T + e env + + ctx context.Context // valid for life of test, before tearDown + cancel context.CancelFunc + + // Configurable knobs, after newTest returns: + testServer testpb.TestServiceServer // nil means none + healthServer *health.HealthServer // nil means disabled + maxStream uint32 + userAgent string + clientCompression bool + serverCompression bool + unaryInt grpc.UnaryServerInterceptor + streamInt grpc.StreamServerInterceptor + + // srv and srvAddr are set once startServer is called. + srv *grpc.Server + srvAddr string + + cc *grpc.ClientConn // nil until requested via clientConn + restoreLogs func() // nil unless declareLogNoise is used +} + +func (te *test) tearDown() { + if te.cancel != nil { + te.cancel() + te.cancel = nil + } + if te.cc != nil { + te.cc.Close() + te.cc = nil + } + if te.restoreLogs != nil { + te.restoreLogs() + te.restoreLogs = nil + } + te.srv.Stop() +} + +// newTest returns a new test using the provided testing.T and +// environment. It is returned with default values. Tests should +// modify it before calling its startServer and clientConn methods. +func newTest(t *testing.T, e env) *test { + te := &test{ + t: t, + e: e, + maxStream: math.MaxUint32, + } + te.ctx, te.cancel = context.WithCancel(context.Background()) + return te +} + +// startServer starts a gRPC server listening. Callers should defer a +// call to te.tearDown to clean up. +func (te *test) startServer(ts testpb.TestServiceServer) { + te.testServer = ts + e := te.e + te.t.Logf("Running test in %s environment...", e.name) + sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)} + if te.serverCompression { + sopts = append(sopts, + grpc.RPCCompressor(grpc.NewGZIPCompressor()), + grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), + ) + } + if te.unaryInt != nil { + sopts = append(sopts, grpc.UnaryInterceptor(te.unaryInt)) + } + if te.streamInt != nil { + sopts = append(sopts, grpc.StreamInterceptor(te.streamInt)) + } + la := "localhost:0" + switch e.network { + case "unix": + la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now()) + syscall.Unlink(la) + } + lis, err := net.Listen(e.network, la) + if err != nil { + te.t.Fatalf("Failed to listen: %v", err) + } + if te.e.security == "tls" { + creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") + if err != nil { + te.t.Fatalf("Failed to generate credentials %v", err) + } + sopts = append(sopts, grpc.Creds(creds)) + } + s := grpc.NewServer(sopts...) + te.srv = s + if e.httpHandler { + internal.TestingUseHandlerImpl(s) + } + if te.healthServer != nil { + healthpb.RegisterHealthServer(s, te.healthServer) + } + if te.testServer != nil { + testpb.RegisterTestServiceServer(s, te.testServer) + } + addr := la + switch e.network { + case "unix": + default: + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + te.t.Fatalf("Failed to parse listener address: %v", err) + } + addr = "localhost:" + port + } + + go s.Serve(lis) + te.srvAddr = addr +} + +func (te *test) clientConn() *grpc.ClientConn { + if te.cc != nil { + return te.cc + } + opts := []grpc.DialOption{ + grpc.WithDialer(te.e.dialer), + grpc.WithUserAgent(te.userAgent), + } + + if te.clientCompression { + opts = append(opts, + grpc.WithCompressor(grpc.NewGZIPCompressor()), + grpc.WithDecompressor(grpc.NewGZIPDecompressor()), + ) + } + if te.e.security == "tls" { + creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + if err != nil { + te.t.Fatalf("Failed to load credentials: %v", err) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + var err error + te.cc, err = grpc.Dial(te.srvAddr, opts...) + if err != nil { + te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err) + } + return te.cc +} + +func (te *test) declareLogNoise(phrases ...string) { + te.restoreLogs = declareLogNoise(te.t, phrases...) +} + +func (te *test) withServerTester(fn func(st *serverTester)) { + var c net.Conn + var err error + c, err = te.e.getDialer()(te.srvAddr, 10*time.Second) + if err != nil { + te.t.Fatal(err) + } + defer c.Close() + if te.e.security == "tls" { + c = tls.Client(c, &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{http2.NextProtoTLS}, + }) + } + st := newServerTesterFromConn(te.t, c) + st.greet() + fn(st) +} + +func TestTimeoutOnDeadServer(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testTimeoutOnDeadServer(t, e) + } +} + +func testTimeoutOnDeadServer(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: Conn.transportMonitor exits due to: grpc: the client connection is closing", + "grpc: Conn.resetTransport failed to create client transport: connection error", + "grpc: Conn.resetTransport failed to create client transport: connection error: desc = \"transport: dial unix", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + te.srv.Stop() + ctx, _ := context.WithTimeout(context.Background(), time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) + } + awaitNewConnLogOutput() +} + +func TestFailFast(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testFailFast(t, e) + } +} + +func testFailFast(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: Conn.transportMonitor exits due to: grpc: the client connection is closing", + "grpc: Conn.resetTransport failed to create client transport: connection error", + "grpc: Conn.resetTransport failed to create client transport: connection error: desc = \"transport: dial unix", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + // Stop the server and tear down all the exisiting connections. + te.srv.Stop() + // Loop until the server teardown is propagated to the client. + for { + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) == codes.Unavailable { + break + } + time.Sleep(10 * time.Millisecond) + } + // The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %d", err, codes.Unavailable) + } + if _, err := tc.StreamingInputCall(context.Background()); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %d", err, codes.Unavailable) + } + + awaitNewConnLogOutput() +} + +func healthCheck(d time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) { + ctx, _ := context.WithTimeout(context.Background(), d) + hc := healthpb.NewHealthClient(cc) + req := &healthpb.HealthCheckRequest{ + Service: serviceName, + } + return hc.Check(ctx, req) +} + +func TestHealthCheckOnSuccess(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testHealthCheckOnSuccess(t, e) + } +} + +func testHealthCheckOnSuccess(t *testing.T, e env) { + te := newTest(t, e) + hs := health.NewHealthServer() + hs.SetServingStatus("grpc.health.v1.Health", 1) + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1.Health"); err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } +} + +func TestHealthCheckOnFailure(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testHealthCheckOnFailure(t, e) + } +} + +func testHealthCheckOnFailure(t *testing.T, e env) { + defer leakCheck(t)() + te := newTest(t, e) + te.declareLogNoise( + "Failed to dial ", + "grpc: the client connection is closing; please retry", + ) + hs := health.NewHealthServer() + hs.SetServingStatus("grpc.health.v1.HealthCheck", 1) + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + wantErr := grpc.Errorf(codes.DeadlineExceeded, "context deadline exceeded") + if _, err := healthCheck(0*time.Second, cc, "grpc.health.v1.Health"); !equalErrors(err, wantErr) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, error code %d", err, codes.DeadlineExceeded) + } + awaitNewConnLogOutput() +} + +func TestHealthCheckOff(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testHealthCheckOff(t, e) + } +} + +func testHealthCheckOff(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + want := grpc.Errorf(codes.Unimplemented, "unknown service grpc.health.v1.Health") + if _, err := healthCheck(1*time.Second, te.clientConn(), ""); !equalErrors(err, want) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, %v", err, want) + } +} + +func TestHealthCheckServingStatus(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testHealthCheckServingStatus(t, e) + } +} + +func testHealthCheckServingStatus(t *testing.T, e env) { + te := newTest(t, e) + hs := health.NewHealthServer() + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + out, err := healthCheck(1*time.Second, cc, "") + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_SERVING { + t.Fatalf("Got the serving status %v, want SERVING", out.Status) + } + wantErr := grpc.Errorf(codes.NotFound, "unknown service") + if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1.Health"); !equalErrors(err, wantErr) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, error code %d", err, codes.NotFound) + } + hs.SetServingStatus("grpc.health.v1.Health", healthpb.HealthCheckResponse_SERVING) + out, err = healthCheck(1*time.Second, cc, "grpc.health.v1.Health") + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_SERVING { + t.Fatalf("Got the serving status %v, want SERVING", out.Status) + } + hs.SetServingStatus("grpc.health.v1.Health", healthpb.HealthCheckResponse_NOT_SERVING) + out, err = healthCheck(1*time.Second, cc, "grpc.health.v1.Health") + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_NOT_SERVING { + t.Fatalf("Got the serving status %v, want NOT_SERVING", out.Status) + } + +} + +func TestErrorChanNoIO(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testErrorChanNoIO(t, e) + } +} + +func testErrorChanNoIO(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + if _, err := tc.FullDuplexCall(context.Background()); err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } +} + +func TestEmptyUnaryWithUserAgent(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testEmptyUnaryWithUserAgent(t, e) + } +} + +func testEmptyUnaryWithUserAgent(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + var header metadata.MD + reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) + if err != nil || !proto.Equal(&testpb.Empty{}, reply) { + t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, ", reply, err, &testpb.Empty{}) + } + if v, ok := header["ua"]; !ok || v[0] != testAppUA { + t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA) + } + + te.srv.Stop() +} + +func TestFailedEmptyUnary(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testFailedEmptyUnary(t, e) + } +} + +func testFailedEmptyUnary(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + ctx := metadata.NewContext(context.Background(), testMetadata) + wantErr := grpc.Errorf(codes.DataLoss, "missing expected user-agent") + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !equalErrors(err, wantErr) { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr) + } +} + +func TestLargeUnary(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testLargeUnary(t, e) + } +} + +func testLargeUnary(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const argSize = 271828 + const respSize = 314159 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + pt := reply.GetPayload().GetType() + ps := len(reply.GetPayload().GetBody()) + if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { + t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) + } +} + +func TestMetadataUnaryRPC(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testMetadataUnaryRPC(t, e) + } +} + +func testMetadataUnaryRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + var header, trailer metadata.MD + ctx := metadata.NewContext(context.Background(), testMetadata) + if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil { + t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) + } + // Ignore optional response headers that Servers may set: + if header != nil { + delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers + delete(header, "date") // the Date header is also optional + } + if !reflect.DeepEqual(header, testMetadata) { + t.Fatalf("Received header metadata %v, want %v", header, testMetadata) + } + if !reflect.DeepEqual(trailer, testTrailerMetadata) { + t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata) + } +} + +// TestMalformedHTTP2Metedata verfies the returned error when the client +// sends an illegal metadata. +func TestMalformedHTTP2Metadata(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testMalformedHTTP2Metadata(t, e) + } +} + +func testMalformedHTTP2Metadata(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(314), + Payload: payload, + } + ctx := metadata.NewContext(context.Background(), malformedHTTP2Metadata) + if _, err := tc.UnaryCall(ctx, req); grpc.Code(err) != codes.Internal { + t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %q", ctx, err, codes.Internal) + } +} + +func performOneRPC(t *testing.T, tc testpb.TestServiceClient, wg *sync.WaitGroup) { + defer wg.Done() + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Error(err) + return + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + reply, err := tc.UnaryCall(context.Background(), req, grpc.FailFast(false)) + if err != nil { + t.Errorf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + return + } + pt := reply.GetPayload().GetType() + ps := len(reply.GetPayload().GetBody()) + if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { + t.Errorf("Got reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) + return + } +} + +func TestRetry(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testRetry(t, e) + } +} + +// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport. +// TODO(zhaoq): Refactor to make this clearer and add more cases to test racy +// and error-prone paths. +func testRetry(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("transport: http2Client.notifyError got notified that the client transport was broken") + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + var wg sync.WaitGroup + + numRPC := 1000 + rpcSpacing := 2 * time.Millisecond + if raceMode { + // The race detector has a limit on how many goroutines it can track. + // This test is near the upper limit, and goes over the limit + // depending on the environment (the http.Handler environment uses + // more goroutines) + t.Logf("Shortening test in race mode.") + numRPC /= 2 + rpcSpacing *= 2 + } + + wg.Add(1) + go func() { + // Halfway through starting RPCs, kill all connections: + time.Sleep(time.Duration(numRPC/2) * rpcSpacing) + + // The server shuts down the network connection to make a + // transport error which will be detected by the client side + // code. + internal.TestingCloseConns(te.srv) + wg.Done() + }() + // All these RPCs should succeed eventually. + for i := 0; i < numRPC; i++ { + time.Sleep(rpcSpacing) + wg.Add(1) + go performOneRPC(t, tc, &wg) + } + wg.Wait() +} + +func TestRPCTimeout(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testRPCTimeout(t, e) + } +} + +// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. +func testRPCTimeout(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + for i := -1; i <= 10; i++ { + ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) + if _, err := tc.UnaryCall(ctx, req); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want , error code: %d", err, codes.DeadlineExceeded) + } + } +} + +func TestCancel(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testCancel(t, e) + } +} + +func testCancel(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("grpc: the client connection is closing; please retry") + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + ctx, cancel := context.WithCancel(context.Background()) + time.AfterFunc(1*time.Millisecond, cancel) + if r, err := tc.UnaryCall(ctx, req); grpc.Code(err) != codes.Canceled { + t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %d", r, err, codes.Canceled) + } + awaitNewConnLogOutput() +} + +func TestCancelNoIO(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testCancelNoIO(t, e) + } +} + +func testCancelNoIO(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken") + te.maxStream = 1 // Only allows 1 live stream per server transport. + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + // Start one blocked RPC for which we'll never send streaming + // input. This will consume the 1 maximum concurrent streams, + // causing future RPCs to hang. + ctx, cancelFirst := context.WithCancel(context.Background()) + _, err := tc.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + + // Loop until the ClientConn receives the initial settings + // frame from the server, notifying it about the maximum + // concurrent streams. We know when it's received it because + // an RPC will fail with codes.DeadlineExceeded instead of + // succeeding. + // TODO(bradfitz): add internal test hook for this (Issue 534) + for { + ctx, cancelSecond := context.WithTimeout(context.Background(), 250*time.Millisecond) + _, err := tc.StreamingInputCall(ctx) + cancelSecond() + if err == nil { + time.Sleep(50 * time.Millisecond) + continue + } + if grpc.Code(err) == codes.DeadlineExceeded { + break + } + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) + } + // If there are any RPCs in flight before the client receives + // the max streams setting, let them be expired. + // TODO(bradfitz): add internal test hook for this (Issue 534) + time.Sleep(500 * time.Millisecond) + + ch := make(chan struct{}) + go func() { + defer close(ch) + + // This should be blocked until the 1st is canceled. + ctx, cancelThird := context.WithTimeout(context.Background(), 2*time.Second) + if _, err := tc.StreamingInputCall(ctx); err != nil { + t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + cancelThird() + }() + cancelFirst() + <-ch +} + +// The following tests the gRPC streaming RPC implementations. +// TODO(zhaoq): Have better coverage on error cases. +var ( + reqSizes = []int{27182, 8, 1828, 45904} + respSizes = []int{31415, 9, 2653, 58979} +) + +func TestNoService(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testNoService(t, e) + } +} + +func testNoService(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(nil) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + stream, err := tc.FullDuplexCall(te.ctx, grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if _, err := stream.Recv(); grpc.Code(err) != codes.Unimplemented { + t.Fatalf("stream.Recv() = _, %v, want _, error code %d", err, codes.Unimplemented) + } +} + +func TestPingPong(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testPingPong(t, e) + } +} + +func testPingPong(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + reply, err := stream.Recv() + if err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + pt := reply.GetPayload().GetType() + if pt != testpb.PayloadType_COMPRESSABLE { + t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) + } +} + +func TestMetadataStreamingRPC(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testMetadataStreamingRPC(t, e) + } +} + +func testMetadataStreamingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + ctx := metadata.NewContext(te.ctx, testMetadata) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + go func() { + headerMD, err := stream.Header() + if e.security == "tls" { + delete(headerMD, "transport_security_type") + } + delete(headerMD, "trailer") // ignore if present + if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { + t.Errorf("#1 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) + } + // test the cached value. + headerMD, err = stream.Header() + delete(headerMD, "trailer") // ignore if present + if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { + t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Errorf("%v.Send(%v) = %v, want ", stream, req, err) + return + } + index++ + } + // Tell the server we're done sending args. + stream.CloseSend() + }() + for { + if _, err := stream.Recv(); err != nil { + break + } + } + trailerMD := stream.Trailer() + if !reflect.DeepEqual(testMetadata, trailerMD) { + t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) + } +} + +func TestServerStreaming(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testServerStreaming(t, e) + } +} + +func testServerStreaming(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + stream, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + } + var rpcStatus error + var respCnt int + var index int + for { + reply, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + pt := reply.GetPayload().GetType() + if pt != testpb.PayloadType_COMPRESSABLE { + t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + respCnt++ + } + if rpcStatus != io.EOF { + t.Fatalf("Failed to finish the server streaming rpc: %v, want ", rpcStatus) + } + if respCnt != len(respSizes) { + t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) + } +} + +func TestFailedServerStreaming(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testFailedServerStreaming(t, e) + } +} + +func testFailedServerStreaming(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + ctx := metadata.NewContext(te.ctx, testMetadata) + stream, err := tc.StreamingOutputCall(ctx, req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + } + wantErr := grpc.Errorf(codes.DataLoss, "got extra metadata") + if _, err := stream.Recv(); !equalErrors(err, wantErr) { + t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr) + } +} + +// concurrentSendServer is a TestServiceServer whose +// StreamingOutputCall makes ten serial Send calls, sending payloads +// "0".."9", inclusive. TestServerStreaming_Concurrent verifies they +// were received in the correct order, and that there were no races. +// +// All other TestServiceServer methods crash if called. +type concurrentSendServer struct { + testpb.TestServiceServer +} + +func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + for i := 0; i < 10; i++ { + stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + Body: []byte{'0' + uint8(i)}, + }, + }) + } + return nil +} + +// Tests doing a bunch of concurrent streaming output calls. +func TestServerStreaming_Concurrent(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testServerStreaming_Concurrent(t, e) + } +} + +func testServerStreaming_Concurrent(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(concurrentSendServer{}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + doStreamingCall := func() { + req := &testpb.StreamingOutputCallRequest{} + stream, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + t.Errorf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + return + } + var ngot int + var buf bytes.Buffer + for { + reply, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + ngot++ + if buf.Len() > 0 { + buf.WriteByte(',') + } + buf.Write(reply.GetPayload().GetBody()) + } + if want := 10; ngot != want { + t.Errorf("Got %d replies, want %d", ngot, want) + } + if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want { + t.Errorf("Got replies %q; want %q", got, want) + } + } + + var wg sync.WaitGroup + for i := 0; i < 20; i++ { + wg.Add(1) + go func() { + defer wg.Done() + doStreamingCall() + }() + } + wg.Wait() + +} + +func TestClientStreaming(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testClientStreaming(t, e) + } +} + +func testClientStreaming(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + stream, err := tc.StreamingInputCall(te.ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) + } + + var sum int + for _, s := range reqSizes { + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingInputCallRequest{ + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + sum += s + } + reply, err := stream.CloseAndRecv() + if err != nil { + t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + if reply.GetAggregatedPayloadSize() != int32(sum) { + t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) + } +} + +func TestClientStreamingError(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testClientStreamingError(t, e) + } +} + +func testClientStreamingError(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, earlyFail: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + stream, err := tc.StreamingInputCall(te.ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingInputCallRequest{ + Payload: payload, + } + // The 1st request should go through. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + for { + if err := stream.Send(req); err != io.EOF { + continue + } + if _, err := stream.CloseAndRecv(); grpc.Code(err) != codes.NotFound { + t.Fatalf("%v.CloseAndRecv() = %v, want error %d", stream, err, codes.NotFound) + } + break + } +} + +func TestExceedMaxStreamsLimit(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testExceedMaxStreamsLimit(t, e) + } +} + +func testExceedMaxStreamsLimit(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "http2Client.notifyError got notified that the client transport was broken", + "Conn.resetTransport failed to create client transport", + "grpc: the client connection is closing", + ) + te.maxStream = 1 // Only allows 1 live stream per server transport. + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + _, err := tc.StreamingInputCall(te.ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + // Loop until receiving the new max stream setting from the server. + for { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _, err := tc.StreamingInputCall(ctx) + if err == nil { + time.Sleep(time.Second) + continue + } + if grpc.Code(err) == codes.DeadlineExceeded { + break + } + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) + } +} + +func TestStreamsQuotaRecovery(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testStreamsQuotaRecovery(t, e) + } +} + +func testStreamsQuotaRecovery(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "http2Client.notifyError got notified that the client transport was broken", + "Conn.resetTransport failed to create client transport", + "grpc: the client connection is closing", + ) + te.maxStream = 1 // Allows 1 live stream. + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithCancel(context.Background()) + if _, err := tc.StreamingInputCall(ctx); err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + // Loop until the new max stream setting is effective. + for { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _, err := tc.StreamingInputCall(ctx) + if err == nil { + time.Sleep(time.Second) + continue + } + if grpc.Code(err) == codes.DeadlineExceeded { + break + } + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %d", tc, err, codes.DeadlineExceeded) + } + cancel() + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + ctx, cancel := context.WithCancel(context.Background()) + if _, err := tc.StreamingInputCall(ctx); err != nil { + t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + cancel() + }() + } + wg.Wait() +} + +func TestCompressServerHasNoSupport(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testCompressServerHasNoSupport(t, e) + } +} + +func testCompressServerHasNoSupport(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = false + te.clientCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.Unimplemented { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %d", err, codes.Unimplemented) + } + // Streaming RPC + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(31415), + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.Unimplemented { + t.Fatalf("%v.Recv() = %v, want error code %d", stream, err, codes.Unimplemented) + } +} + +func TestCompressOK(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testCompressOK(t, e) + } +} + +func testCompressOK(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = true + te.clientCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + // Unary call + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + ctx := metadata.NewContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(31415), + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } +} + +func TestUnaryServerInterceptor(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testUnaryServerInterceptor(t, e) + } +} + +func errInjector(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return nil, grpc.Errorf(codes.PermissionDenied, "") +} + +func testUnaryServerInterceptor(t *testing.T, e env) { + te := newTest(t, e) + te.unaryInt = errInjector + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.PermissionDenied { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %d", tc, err, codes.PermissionDenied) + } +} + +func TestStreamServerInterceptor(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testStreamServerInterceptor(t, e) + } +} + +func fullDuplexOnly(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" { + return handler(srv, ss) + } + // Reject the other methods. + return grpc.Errorf(codes.PermissionDenied, "") +} + +func testStreamServerInterceptor(t *testing.T, e env) { + te := newTest(t, e) + te.streamInt = fullDuplexOnly + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(1)), + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + s1, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, ", tc, err) + } + if _, err := s1.Recv(); grpc.Code(err) != codes.PermissionDenied { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %d", tc, err, codes.PermissionDenied) + } + s2, err := tc.FullDuplexCall(context.Background()) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := s2.Send(req); err != nil { + t.Fatalf("%v.Send(_) = %v, want ", s2, err) + } + if _, err := s2.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", s2, err) + } +} + +// funcServer implements methods of TestServiceServer using funcs, +// similar to an http.HandlerFunc. +// Any unimplemented method will crash. Tests implement the method(s) +// they need. +type funcServer struct { + testpb.TestServiceServer + unaryCall func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) + streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error +} + +func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return s.unaryCall(ctx, in) +} + +func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + return s.streamingInputCall(stream) +} + +func TestClientRequestBodyError_UnexpectedEOF(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testClientRequestBodyError_UnexpectedEOF(t, e) + } +} + +func testClientRequestBodyError_UnexpectedEOF(t *testing.T, e env) { + te := newTest(t, e) + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + errUnexpectedCall := errors.New("unexpected call func server method") + t.Error(errUnexpectedCall) + return nil, errUnexpectedCall + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") + // Say we have 5 bytes coming, but set END_STREAM flag: + st.writeData(1, true, []byte{0, 0, 0, 0, 5}) + st.wantAnyFrame() // wait for server to crash (it used to crash) + }) +} + +func TestClientRequestBodyError_CloseAfterLength(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testClientRequestBodyError_CloseAfterLength(t, e) + } +} + +func testClientRequestBodyError_CloseAfterLength(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("Server.processUnaryRPC failed to write status") + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + errUnexpectedCall := errors.New("unexpected call func server method") + t.Error(errUnexpectedCall) + return nil, errUnexpectedCall + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") + // say we're sending 5 bytes, but then close the connection instead. + st.writeData(1, false, []byte{0, 0, 0, 0, 5}) + st.cc.Close() + }) +} + +func TestClientRequestBodyError_Cancel(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testClientRequestBodyError_Cancel(t, e) + } +} + +func testClientRequestBodyError_Cancel(t *testing.T, e env) { + te := newTest(t, e) + gotCall := make(chan bool, 1) + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + gotCall <- true + return new(testpb.SimpleResponse), nil + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") + // Say we have 5 bytes coming, but cancel it instead. + st.writeData(1, false, []byte{0, 0, 0, 0, 5}) + st.writeRSTStream(1, http2.ErrCodeCancel) + + // Verify we didn't a call yet. + select { + case <-gotCall: + t.Fatal("unexpected call") + default: + } + + // And now send an uncanceled (but still invalid), just to get a response. + st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall") + st.writeData(3, true, []byte{0, 0, 0, 0, 0}) + <-gotCall + st.wantAnyFrame() + }) +} + +func TestClientRequestBodyError_Cancel_StreamingInput(t *testing.T) { + defer leakCheck(t)() + for _, e := range listTestEnv() { + testClientRequestBodyError_Cancel_StreamingInput(t, e) + } +} + +func testClientRequestBodyError_Cancel_StreamingInput(t *testing.T, e env) { + te := newTest(t, e) + recvErr := make(chan error, 1) + ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + _, err := stream.Recv() + recvErr <- err + return nil + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall") + // Say we have 5 bytes coming, but cancel it instead. + st.writeData(1, false, []byte{0, 0, 0, 0, 5}) + st.writeRSTStream(1, http2.ErrCodeCancel) + + var got error + select { + case got = <-recvErr: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for error") + } + if se, ok := got.(transport.StreamError); !ok || se.Code != codes.Canceled { + t.Errorf("error = %#v; want transport.StreamError with code Canceled", got) + } + }) +} + +// interestingGoroutines returns all goroutines we care about for the purpose +// of leak checking. It excludes testing or runtime ones. +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for _, g := range strings.Split(string(buf), "\n\n") { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + continue + } + stack := strings.TrimSpace(sl[1]) + if strings.HasPrefix(stack, "testing.RunTests") { + continue + } + + if stack == "" || + strings.Contains(stack, "testing.Main(") || + strings.Contains(stack, "runtime.goexit") || + strings.Contains(stack, "created by runtime.gc") || + strings.Contains(stack, "created by google3/base/go/log.init") || + strings.Contains(stack, "interestingGoroutines") || + strings.Contains(stack, "runtime.MHeap_Scavenger") || + strings.Contains(stack, "signal.signal_recv") || + strings.Contains(stack, "sigterm.handler") || + strings.Contains(stack, "runtime_mcall") || + strings.Contains(stack, "goroutine in C code") { + continue + } + gs = append(gs, g) + } + sort.Strings(gs) + return +} + +// leakCheck snapshots the currently-running goroutines and returns a +// function to be run at the end of tests to see whether any +// goroutines leaked. +func leakCheck(t testing.TB) func() { + orig := map[string]bool{} + for _, g := range interestingGoroutines() { + orig[g] = true + } + return func() { + // Loop, waiting for goroutines to shut down. + // Wait up to 5 seconds, but finish as quickly as possible. + deadline := time.Now().Add(5 * time.Second) + for { + var leaked []string + for _, g := range interestingGoroutines() { + if !orig[g] { + leaked = append(leaked, g) + } + } + if len(leaked) == 0 { + return + } + if time.Now().Before(deadline) { + time.Sleep(50 * time.Millisecond) + continue + } + for _, g := range leaked { + t.Errorf("Leaked goroutine: %v", g) + } + return + } + } +} + +type lockingWriter struct { + mu sync.Mutex + w io.Writer +} + +func (lw *lockingWriter) Write(p []byte) (n int, err error) { + lw.mu.Lock() + defer lw.mu.Unlock() + return lw.w.Write(p) +} + +func (lw *lockingWriter) setWriter(w io.Writer) { + lw.mu.Lock() + defer lw.mu.Unlock() + lw.w = w +} + +var testLogOutput = &lockingWriter{w: os.Stderr} + +// awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to +// terminate, if they're still running. It spams logs with this +// message. We wait for it so our log filter is still +// active. Otherwise the "defer restore()" at the top of various test +// functions restores our log filter and then the goroutine spams. +func awaitNewConnLogOutput() { + awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry") +} + +func awaitLogOutput(maxWait time.Duration, phrase string) { + pb := []byte(phrase) + + timer := time.NewTimer(maxWait) + defer timer.Stop() + wakeup := make(chan bool, 1) + for { + if logOutputHasContents(pb, wakeup) { + return + } + select { + case <-timer.C: + // Too slow. Oh well. + return + case <-wakeup: + } + } +} + +func logOutputHasContents(v []byte, wakeup chan<- bool) bool { + testLogOutput.mu.Lock() + defer testLogOutput.mu.Unlock() + fw, ok := testLogOutput.w.(*filterWriter) + if !ok { + return false + } + fw.mu.Lock() + defer fw.mu.Unlock() + if bytes.Contains(fw.buf.Bytes(), v) { + return true + } + fw.wakeup = wakeup + return false +} + +func init() { + grpclog.SetLogger(log.New(testLogOutput, "", log.LstdFlags)) +} + +var verboseLogs = flag.Bool("verbose_logs", false, "show all grpclog output, without filtering") + +func noop() {} + +// declareLogNoise declares that t is expected to emit the following noisy phrases, +// even on success. Those phrases will be filtered from grpclog output +// and only be shown if *verbose_logs or t ends up failing. +// The returned restore function should be called with defer to be run +// before the test ends. +func declareLogNoise(t *testing.T, phrases ...string) (restore func()) { + if *verboseLogs { + return noop + } + fw := &filterWriter{dst: os.Stderr, filter: phrases} + testLogOutput.setWriter(fw) + return func() { + if t.Failed() { + fw.mu.Lock() + defer fw.mu.Unlock() + if fw.buf.Len() > 0 { + t.Logf("Complete log output:\n%s", fw.buf.Bytes()) + } + } + testLogOutput.setWriter(os.Stderr) + } +} + +type filterWriter struct { + dst io.Writer + filter []string + + mu sync.Mutex + buf bytes.Buffer + wakeup chan<- bool // if non-nil, gets true on write +} + +func (fw *filterWriter) Write(p []byte) (n int, err error) { + fw.mu.Lock() + fw.buf.Write(p) + if fw.wakeup != nil { + select { + case fw.wakeup <- true: + default: + } + } + fw.mu.Unlock() + + ps := string(p) + for _, f := range fw.filter { + if strings.Contains(ps, f) { + return len(p), nil + } + } + return fw.dst.Write(p) +} + +func equalErrors(l, r error) bool { + return grpc.Code(l) == grpc.Code(r) && grpc.ErrorDesc(l) == grpc.ErrorDesc(r) +} diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go new file mode 100644 index 00000000000..0ceb12df561 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go @@ -0,0 +1,788 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + Empty + Payload + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) Enum() *PayloadType { + p := new(PayloadType) + *p = x + return p +} +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (x *PayloadType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") + if err != nil { + return err + } + *x = PayloadType(value) + return nil +} +func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Payload) GetType() PayloadType { + if m != nil && m.Type != nil { + return *m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername *bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil && m.FillUsername != nil { + return *m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil && m.FillOauthScope != nil { + return *m.FillOauthScope + } + return false +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleResponse) GetUsername() string { + if m != nil && m.Username != nil { + return *m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil && m.OauthScope != nil { + return *m.OauthScope + } + return "" +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} +func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} +func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil && m.AggregatedPayloadSize != nil { + return *m.AggregatedPayloadSize + } + return 0 +} + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} +func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ResponseParameters) GetSize() int32 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil && m.IntervalUs != nil { + return *m.IntervalUs + } + return 0 +} + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} +func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} +func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") + proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") + proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") + proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") + proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") + proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for TestService service + +type TestServiceClient interface { + // One empty request followed by one empty response. + EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingOutputCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_StreamingOutputCallClient interface { + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingOutputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingInputCallClient{stream} + return x, nil +} + +type TestService_StreamingInputCallClient interface { + Send(*StreamingInputCallRequest) error + CloseAndRecv() (*StreamingInputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingInputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamingInputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceFullDuplexCallClient{stream} + return x, nil +} + +type TestService_FullDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceFullDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceHalfDuplexCallClient{stream} + return x, nil +} + +type TestService_HalfDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceHalfDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One empty request followed by one empty response. + EmptyCall(context.Context, *Empty) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(TestService_StreamingInputCallServer) error + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(TestService_FullDuplexCallServer) error + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(TestService_HalfDuplexCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).EmptyCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/EmptyCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingOutputCallRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +} + +type TestService_StreamingOutputCallServer interface { + Send(*StreamingOutputCallResponse) error + grpc.ServerStream +} + +type testServiceStreamingOutputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) +} + +type TestService_StreamingInputCallServer interface { + SendAndClose(*StreamingInputCallResponse) error + Recv() (*StreamingInputCallRequest, error) + grpc.ServerStream +} + +type testServiceStreamingInputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { + m := new(StreamingInputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) +} + +type TestService_FullDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceFullDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) +} + +type TestService_HalfDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceHalfDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EmptyCall", + Handler: _TestService_EmptyCall_Handler, + }, + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingOutputCall", + Handler: _TestService_StreamingOutputCall_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingInputCall", + Handler: _TestService_StreamingInputCall_Handler, + ClientStreams: true, + }, + { + StreamName: "FullDuplexCall", + Handler: _TestService_FullDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "HalfDuplexCall", + Handler: _TestService_HalfDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x54, 0x51, 0x6f, 0xd2, 0x50, + 0x14, 0xb6, 0x03, 0x64, 0x1c, 0x58, 0x43, 0x0e, 0x59, 0x64, 0x9d, 0x89, 0x4b, 0x7d, 0xb0, 0x9a, + 0x88, 0x86, 0x44, 0x1f, 0x35, 0x73, 0x63, 0x71, 0x09, 0x03, 0x6c, 0xe1, 0x99, 0x5c, 0xe1, 0x0e, + 0x9b, 0x94, 0xb6, 0xb6, 0xb7, 0x46, 0x7c, 0xf0, 0x8f, 0xf9, 0x67, 0xfc, 0x11, 0xfe, 0x00, 0xef, + 0xbd, 0x6d, 0xa1, 0x40, 0x17, 0x99, 0xc6, 0xbd, 0xb5, 0xdf, 0xf9, 0xce, 0x77, 0xbe, 0xef, 0x9e, + 0xdb, 0x02, 0x30, 0x1a, 0xb2, 0x96, 0x1f, 0x78, 0xcc, 0xc3, 0xda, 0x2c, 0xf0, 0x27, 0x2d, 0x01, + 0xd8, 0xee, 0x4c, 0x2f, 0x43, 0xa9, 0x33, 0xf7, 0xd9, 0x42, 0xef, 0x42, 0x79, 0x40, 0x16, 0x8e, + 0x47, 0xa6, 0xf8, 0x1c, 0x8a, 0x6c, 0xe1, 0xd3, 0xa6, 0x72, 0xa2, 0x18, 0x6a, 0xfb, 0xa8, 0x95, + 0x6d, 0x68, 0x25, 0xa4, 0x21, 0x27, 0x98, 0x92, 0x86, 0x08, 0xc5, 0x8f, 0xde, 0x74, 0xd1, 0xdc, + 0xe3, 0xf4, 0x9a, 0x29, 0x9f, 0xf5, 0x5f, 0x0a, 0x1c, 0x58, 0xf6, 0xdc, 0x77, 0xa8, 0x49, 0x3f, + 0x47, 0xbc, 0x15, 0xdf, 0xc0, 0x41, 0x40, 0x43, 0xdf, 0x73, 0x43, 0x3a, 0xde, 0x4d, 0xbd, 0x96, + 0xf2, 0xc5, 0x1b, 0x3e, 0xce, 0xf4, 0x87, 0xf6, 0x37, 0x2a, 0xc7, 0x95, 0x56, 0x24, 0x8b, 0x63, + 0xf8, 0x02, 0xca, 0x7e, 0xac, 0xd0, 0x2c, 0xf0, 0x72, 0xb5, 0x7d, 0x98, 0x2b, 0x6f, 0xa6, 0x2c, + 0xa1, 0x7a, 0x6d, 0x3b, 0xce, 0x38, 0x0a, 0x69, 0xe0, 0x92, 0x39, 0x6d, 0x16, 0x79, 0xdb, 0xbe, + 0x59, 0x13, 0xe0, 0x28, 0xc1, 0xd0, 0x80, 0xba, 0x24, 0x79, 0x24, 0x62, 0x9f, 0xc6, 0xe1, 0xc4, + 0xe3, 0xee, 0x4b, 0x92, 0xa7, 0x0a, 0xbc, 0x2f, 0x60, 0x4b, 0xa0, 0xfa, 0x77, 0x50, 0xd3, 0xd4, + 0xb1, 0xab, 0xac, 0x23, 0x65, 0x27, 0x47, 0x1a, 0xec, 0x2f, 0xcd, 0x88, 0x88, 0x15, 0x73, 0xf9, + 0x8e, 0x8f, 0xa0, 0x9a, 0xf5, 0x50, 0x90, 0x65, 0xf0, 0x56, 0xf3, 0xbb, 0x70, 0x64, 0xb1, 0x80, + 0x92, 0x39, 0x97, 0xbe, 0x74, 0xfd, 0x88, 0x9d, 0x11, 0xc7, 0x49, 0x37, 0x70, 0x5b, 0x2b, 0xfa, + 0x10, 0xb4, 0x3c, 0xb5, 0x24, 0xd9, 0x6b, 0x78, 0x40, 0x66, 0xb3, 0x80, 0xce, 0x08, 0xa3, 0xd3, + 0x71, 0xd2, 0x13, 0xaf, 0x46, 0x91, 0xab, 0x39, 0x5c, 0x95, 0x13, 0x69, 0xb1, 0x23, 0xfd, 0x12, + 0x30, 0xd5, 0x18, 0x90, 0x80, 0xc7, 0x62, 0x34, 0x08, 0xc5, 0x25, 0xca, 0xb4, 0xca, 0x67, 0x11, + 0xd7, 0x76, 0x79, 0xf5, 0x0b, 0x11, 0x0b, 0x4a, 0x16, 0x0e, 0x29, 0x34, 0x0a, 0xf5, 0x9f, 0x4a, + 0xc6, 0x61, 0x3f, 0x62, 0x1b, 0x81, 0xff, 0xf5, 0xca, 0x7d, 0x80, 0xc6, 0xb2, 0xdf, 0x5f, 0x5a, + 0xe5, 0x3e, 0x0a, 0xfc, 0xf0, 0x4e, 0xd6, 0x55, 0xb6, 0x23, 0x99, 0x18, 0x6c, 0xc7, 0xbc, 0xed, + 0x05, 0xd5, 0x7b, 0x70, 0x9c, 0x9b, 0xf0, 0x2f, 0xaf, 0xd7, 0xb3, 0xb7, 0x50, 0xcd, 0x04, 0xc6, + 0x3a, 0xd4, 0xce, 0xfa, 0x57, 0x03, 0xb3, 0x63, 0x59, 0xa7, 0xef, 0xba, 0x9d, 0xfa, 0x3d, 0xbe, + 0x08, 0x75, 0xd4, 0x5b, 0xc3, 0x14, 0x04, 0xb8, 0x6f, 0x9e, 0xf6, 0xce, 0xfb, 0x57, 0xf5, 0xbd, + 0xf6, 0x8f, 0x22, 0x54, 0x87, 0x5c, 0xdd, 0xe2, 0x4b, 0xb0, 0x27, 0x14, 0x5f, 0x41, 0x45, 0xfe, + 0x40, 0x84, 0x2d, 0x6c, 0xac, 0x4f, 0x97, 0x05, 0x2d, 0x0f, 0xc4, 0x0b, 0xa8, 0x8c, 0x5c, 0x12, + 0xc4, 0x6d, 0xc7, 0xeb, 0x8c, 0xb5, 0x1f, 0x87, 0xf6, 0x30, 0xbf, 0x98, 0x1c, 0x80, 0x03, 0x8d, + 0x9c, 0xf3, 0x41, 0x63, 0xa3, 0xe9, 0xc6, 0x4b, 0xa2, 0x3d, 0xdd, 0x81, 0x19, 0xcf, 0x7a, 0xa9, + 0xa0, 0x0d, 0xb8, 0xfd, 0x45, 0xe0, 0x93, 0x1b, 0x24, 0x36, 0xbf, 0x40, 0xcd, 0xf8, 0x33, 0x31, + 0x1e, 0x65, 0x88, 0x51, 0xea, 0x45, 0xe4, 0x38, 0xe7, 0x11, 0x4f, 0xfb, 0xf5, 0xbf, 0x65, 0x32, + 0x14, 0x99, 0x4a, 0x7d, 0x4f, 0x9c, 0xeb, 0x3b, 0x18, 0xf5, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x4c, + 0x41, 0xfe, 0xb6, 0x89, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.proto b/vendor/google.golang.org/grpc/test/grpc_testing/test.proto new file mode 100644 index 00000000000..b5bfe053789 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/grpc_testing/test.proto @@ -0,0 +1,140 @@ +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto2"; + +package grpc.testing; + +message Empty {} + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + optional PayloadType type = 1; + // Primary contents of payload. + optional bytes body = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + optional PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 response_size = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; + + // Whether SimpleResponse should include username. + optional bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + optional bool fill_oauth_scope = 5; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + optional Payload payload = 1; + + // The user the request came from, for verifying authentication was + // successful when the client expected it. + optional string username = 2; + + // OAuth scope. + optional string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + optional Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + optional int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + optional int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + optional PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + optional Payload payload = 1; +} + +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. +service TestService { + // One empty request followed by one empty response. + rpc EmptyCall(Empty) returns (Empty); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + rpc StreamingOutputCall(StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + rpc StreamingInputCall(stream StreamingInputCallRequest) + returns (StreamingInputCallResponse); + + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + rpc FullDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + rpc HalfDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); +} diff --git a/vendor/google.golang.org/grpc/test/race_test.go b/vendor/google.golang.org/grpc/test/race_test.go new file mode 100644 index 00000000000..b3a7056c66b --- /dev/null +++ b/vendor/google.golang.org/grpc/test/race_test.go @@ -0,0 +1,39 @@ +// +build race + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc_test + +func init() { + raceMode = true +} diff --git a/vendor/google.golang.org/grpc/test/servertester_test.go b/vendor/google.golang.org/grpc/test/servertester_test.go new file mode 100644 index 00000000000..0225a8576de --- /dev/null +++ b/vendor/google.golang.org/grpc/test/servertester_test.go @@ -0,0 +1,289 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package grpc_test + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// This is a subset of http2's serverTester type. +// +// serverTester wraps a io.ReadWriter (acting like the underlying +// network connection) and provides utility methods to read and write +// http2 frames. +// +// NOTE(bradfitz): this could eventually be exported somewhere. Others +// have asked for it too. For now I'm still experimenting with the +// API and don't feel like maintaining a stable testing API. + +type serverTester struct { + cc io.ReadWriteCloser // client conn + t testing.TB + fr *http2.Framer + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder + + // reading frames: + frc chan http2.Frame + frErrc chan error + readTimer *time.Timer +} + +func newServerTesterFromConn(t testing.TB, cc io.ReadWriteCloser) *serverTester { + st := &serverTester{ + t: t, + cc: cc, + frc: make(chan http2.Frame, 1), + frErrc: make(chan error, 1), + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.fr = http2.NewFramer(cc, cc) + st.fr.ReadMetaHeaders = hpack.NewDecoder(4096 /*initialHeaderTableSize*/, nil) + + return st +} + +func (st *serverTester) readFrame() (http2.Frame, error) { + go func() { + fr, err := st.fr.ReadFrame() + if err != nil { + st.frErrc <- err + } else { + st.frc <- fr + } + }() + t := time.NewTimer(2 * time.Second) + defer t.Stop() + select { + case f := <-st.frc: + return f, nil + case err := <-st.frErrc: + return nil, err + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.writePreface() + st.writeInitialSettings() + st.wantSettings() + st.writeSettingsAck() + for { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *http2.WindowUpdateFrame: + // grpc's transport/http2_server sends this + // before the settings ack. The Go http2 + // server uses a setting instead. + case *http2.SettingsFrame: + if f.IsAck() { + return + } + st.t.Fatalf("during greet, got non-ACK settings frame") + default: + st.t.Fatalf("during greet, unexpected frame type %T", f) + } + } +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write([]byte(http2.ClientPreface)) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(http2.ClientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(http2.ClientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) wantSettings() *http2.SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*http2.SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*http2.SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.IsAck() { + st.t.Fatal("Settings Frame didn't have ACK set") + } +} + +// wait for any activity from the server +func (st *serverTester) wantAnyFrame() http2.Frame { + f, err := st.fr.ReadFrame() + if err != nil { + st.t.Fatal(err) + } + return f +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":path", ":scheme"} + vals := map[string][]string{ + ":method": {"GET"}, + ":path": {"/"}, + ":scheme": {"https"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +func (st *serverTester) writeHeadersGRPC(streamID uint32, path string) { + st.writeHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: st.encodeHeader( + ":method", "POST", + ":path", path, + "content-type", "application/grpc", + "te", "trailers", + ), + EndStream: false, + EndHeaders: true, + }) +} + +func (st *serverTester) writeHeaders(p http2.HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { + if err := st.fr.WriteRSTStream(streamID, code); err != nil { + st.t.Fatalf("Error writing RST_STREAM: %v", err) + } +} diff --git a/vendor/google.golang.org/grpc/test/testdata/ca.pem b/vendor/google.golang.org/grpc/test/testdata/ca.pem new file mode 100644 index 00000000000..6c8511a73c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/test/testdata/server1.key b/vendor/google.golang.org/grpc/test/testdata/server1.key new file mode 100644 index 00000000000..143a5b87658 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/test/testdata/server1.pem b/vendor/google.golang.org/grpc/test/testdata/server1.pem new file mode 100644 index 00000000000..f3d43fcc5be --- /dev/null +++ b/vendor/google.golang.org/grpc/test/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/testdata/ca.pem b/vendor/google.golang.org/grpc/testdata/ca.pem new file mode 100644 index 00000000000..6c8511a73c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/testdata/server1.key b/vendor/google.golang.org/grpc/testdata/server1.key new file mode 100644 index 00000000000..143a5b87658 --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/testdata/server1.pem b/vendor/google.golang.org/grpc/testdata/server1.pem new file mode 100644 index 00000000000..f3d43fcc5be --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index cde04fbfc9e..f6747e1dfa4 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -101,9 +101,8 @@ type payload struct { func (p payload) String() string { if p.sent { return fmt.Sprintf("sent: %v", p.msg) - } else { - return fmt.Sprintf("recv: %v", p.msg) } + return fmt.Sprintf("recv: %v", p.msg) } type fmtStringer struct { diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index 6b5201e7a61..7e9bdf33506 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -56,42 +56,33 @@ type windowUpdate struct { increment uint32 } -func (windowUpdate) isItem() bool { - return true -} +func (*windowUpdate) item() {} type settings struct { ack bool ss []http2.Setting } -func (settings) isItem() bool { - return true -} +func (*settings) item() {} type resetStream struct { streamID uint32 code http2.ErrCode } -func (resetStream) isItem() bool { - return true -} +func (*resetStream) item() {} type flushIO struct { } -func (flushIO) isItem() bool { - return true -} +func (*flushIO) item() {} type ping struct { - ack bool + ack bool + data [8]byte } -func (ping) isItem() bool { - return true -} +func (*ping) item() {} // quotaPool is a pool which accumulates the quota and sends it to acquire() // when it is available. @@ -171,10 +162,6 @@ func (qb *quotaPool) acquire() <-chan int { type inFlow struct { // The inbound flow control limit for pending data. limit uint32 - // conn points to the shared connection-level inFlow that is shared - // by all streams on that conn. It is nil for the inFlow on the conn - // directly. - conn *inFlow mu sync.Mutex // pendingData is the overall data which have been received but not been @@ -185,75 +172,39 @@ type inFlow struct { pendingUpdate uint32 } -// onData is invoked when some data frame is received. It increments not only its -// own pendingData but also that of the associated connection-level flow. +// onData is invoked when some data frame is received. It updates pendingData. func (f *inFlow) onData(n uint32) error { - if n == 0 { - return nil - } f.mu.Lock() defer f.mu.Unlock() - if f.pendingData+f.pendingUpdate+n > f.limit { - return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) - } - if f.conn != nil { - if err := f.conn.onData(n); err != nil { - return ConnectionErrorf("%v", err) - } - } f.pendingData += n - return nil -} - -// connOnRead updates the connection level states when the application consumes data. -func (f *inFlow) connOnRead(n uint32) uint32 { - if n == 0 || f.conn != nil { - return 0 + if f.pendingData+f.pendingUpdate > f.limit { + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) } - f.mu.Lock() - defer f.mu.Unlock() - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - ret := f.pendingUpdate - f.pendingUpdate = 0 - return ret - } - return 0 + return nil } -// onRead is invoked when the application reads the data. It returns the window updates -// for both stream and connection level. -func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { - if n == 0 { - return - } +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { f.mu.Lock() defer f.mu.Unlock() if f.pendingData == 0 { - // pendingData has been adjusted by restoreConn. - return + return 0 } f.pendingData -= n f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { - swu = f.pendingUpdate + wu := f.pendingUpdate f.pendingUpdate = 0 + return wu } - cwu = f.conn.connOnRead(n) - return + return 0 } -// restoreConn is invoked when a stream is terminated. It removes its stake in -// the connection-level flow and resets its own state. -func (f *inFlow) restoreConn() uint32 { - if f.conn == nil { - return 0 - } +func (f *inFlow) resetPendingData() uint32 { f.mu.Lock() defer f.mu.Unlock() n := f.pendingData f.pendingData = 0 - f.pendingUpdate = 0 - return f.conn.connOnRead(n) + return n } diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 00000000000..5db139d2394 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,393 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + if !validContentType(r.Header.Get("Content-Type")) { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := timeoutDecode(v) + if err != nil { + return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + var metakv []string + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { + continue + } + for _, v := range vv { + if k == "user-agent" { + // user-agent is special. Copying logic of http_util.go. + if i := strings.LastIndex(v, " "); i == -1 { + // There is no application user agent string being set + continue + } else { + v = v[:i] + } + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) + if statusDesc != "" { + h.Set("Grpc-Message", grpcMessageEncode(statusDesc)) + } + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to + // send undeclared Trailers after the + // headers have possibly been written. + h.Add(http2.TrailerPrefix+k, v) + } + } + } + }) + close(ht.writes) + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", "application/grpc") + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + var ctx context.Context + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + windowHandler: func(int) {}, // nothing + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewContext(ctx, ht.headerMD) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) + s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(&recvMsg{data: buf[:n:n]}) + buf = buf[n:] + } + if err != nil { + s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return ConnectionError{Desc: err.Error()} +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server_test.go b/vendor/google.golang.org/grpc/transport/handler_server_test.go new file mode 100644 index 00000000000..0711d012e96 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/handler_server_test.go @@ -0,0 +1,389 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +func TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { + type testCase struct { + name string + req *http.Request + wantErr string + modrw func(http.ResponseWriter) http.ResponseWriter + check func(*serverHandlerTransport, *testCase) error + } + tests := []testCase{ + { + name: "http/1.1", + req: &http.Request{ + ProtoMajor: 1, + ProtoMinor: 1, + }, + wantErr: "gRPC requires HTTP/2", + }, + { + name: "bad method", + req: &http.Request{ + ProtoMajor: 2, + Method: "GET", + Header: http.Header{}, + RequestURI: "/", + }, + wantErr: "invalid gRPC request method", + }, + { + name: "bad content type", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/foo"}, + }, + RequestURI: "/service/foo.bar", + }, + wantErr: "invalid gRPC request content-type", + }, + { + name: "not flusher", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + RequestURI: "/service/foo.bar", + }, + modrw: func(w http.ResponseWriter) http.ResponseWriter { + // Return w without its Flush method + type onlyCloseNotifier interface { + http.ResponseWriter + http.CloseNotifier + } + return struct{ onlyCloseNotifier }{w.(onlyCloseNotifier)} + }, + wantErr: "gRPC requires a ResponseWriter supporting http.Flusher", + }, + { + name: "not closenotifier", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + RequestURI: "/service/foo.bar", + }, + modrw: func(w http.ResponseWriter) http.ResponseWriter { + // Return w without its CloseNotify method + type onlyFlusher interface { + http.ResponseWriter + http.Flusher + } + return struct{ onlyFlusher }{w.(onlyFlusher)} + }, + wantErr: "gRPC requires a ResponseWriter supporting http.CloseNotifier", + }, + { + name: "valid", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + check: func(t *serverHandlerTransport, tt *testCase) error { + if t.req != tt.req { + return fmt.Errorf("t.req = %p; want %p", t.req, tt.req) + } + if t.rw == nil { + return errors.New("t.rw = nil; want non-nil") + } + return nil + }, + }, + { + name: "with timeout", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": []string{"application/grpc"}, + "Grpc-Timeout": {"200m"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + check: func(t *serverHandlerTransport, tt *testCase) error { + if !t.timeoutSet { + return errors.New("timeout not set") + } + if want := 200 * time.Millisecond; t.timeout != want { + return fmt.Errorf("timeout = %v; want %v", t.timeout, want) + } + return nil + }, + }, + { + name: "with bad timeout", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": []string{"application/grpc"}, + "Grpc-Timeout": {"tomorrow"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + wantErr: `stream error: code = 13 desc = "malformed time-out: transport: timeout unit is not recognized: \"tomorrow\""`, + }, + { + name: "with metadata", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": []string{"application/grpc"}, + "meta-foo": {"foo-val"}, + "meta-bar": {"bar-val1", "bar-val2"}, + "user-agent": {"x/y a/b"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + check: func(ht *serverHandlerTransport, tt *testCase) error { + want := metadata.MD{ + "meta-bar": {"bar-val1", "bar-val2"}, + "user-agent": {"x/y"}, + "meta-foo": {"foo-val"}, + } + if !reflect.DeepEqual(ht.headerMD, want) { + return fmt.Errorf("metdata = %#v; want %#v", ht.headerMD, want) + } + return nil + }, + }, + } + + for _, tt := range tests { + rw := newTestHandlerResponseWriter() + if tt.modrw != nil { + rw = tt.modrw(rw) + } + got, gotErr := NewServerHandlerTransport(rw, tt.req) + if (gotErr != nil) != (tt.wantErr != "") || (gotErr != nil && gotErr.Error() != tt.wantErr) { + t.Errorf("%s: error = %v; want %q", tt.name, gotErr, tt.wantErr) + continue + } + if gotErr != nil { + continue + } + if tt.check != nil { + if err := tt.check(got.(*serverHandlerTransport), &tt); err != nil { + t.Errorf("%s: %v", tt.name, err) + } + } + } +} + +type testHandlerResponseWriter struct { + *httptest.ResponseRecorder + closeNotify chan bool +} + +func (w testHandlerResponseWriter) CloseNotify() <-chan bool { return w.closeNotify } +func (w testHandlerResponseWriter) Flush() {} + +func newTestHandlerResponseWriter() http.ResponseWriter { + return testHandlerResponseWriter{ + ResponseRecorder: httptest.NewRecorder(), + closeNotify: make(chan bool, 1), + } +} + +type handleStreamTest struct { + t *testing.T + bodyw *io.PipeWriter + req *http.Request + rw testHandlerResponseWriter + ht *serverHandlerTransport +} + +func newHandleStreamTest(t *testing.T) *handleStreamTest { + bodyr, bodyw := io.Pipe() + req := &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + Body: bodyr, + } + rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) + ht, err := NewServerHandlerTransport(rw, req) + if err != nil { + t.Fatal(err) + } + return &handleStreamTest{ + t: t, + bodyw: bodyw, + ht: ht.(*serverHandlerTransport), + rw: rw, + } +} + +func TestHandlerTransport_HandleStreams(t *testing.T) { + st := newHandleStreamTest(t) + handleStream := func(s *Stream) { + if want := "/service/foo.bar"; s.method != want { + t.Errorf("stream method = %q; want %q", s.method, want) + } + st.bodyw.Close() // no body + st.ht.WriteStatus(s, codes.OK, "") + } + st.ht.HandleStreams(func(s *Stream) { go handleStream(s) }) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message"}, + "Grpc-Status": {"0"}, + } + if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer Map: %#v; want %#v", st.rw.HeaderMap, wantHeader) + } +} + +// Tests that codes.Unimplemented will close the body, per comment in handler_server.go. +func TestHandlerTransport_HandleStreams_Unimplemented(t *testing.T) { + handleStreamCloseBodyTest(t, codes.Unimplemented, "thingy is unimplemented") +} + +// Tests that codes.InvalidArgument will close the body, per comment in handler_server.go. +func TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) { + handleStreamCloseBodyTest(t, codes.InvalidArgument, "bad arg") +} + +func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string) { + st := newHandleStreamTest(t) + handleStream := func(s *Stream) { + st.ht.WriteStatus(s, statusCode, msg) + } + st.ht.HandleStreams(func(s *Stream) { go handleStream(s) }) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message"}, + "Grpc-Status": {fmt.Sprint(uint32(statusCode))}, + "Grpc-Message": {grpcMessageEncode(msg)}, + } + if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer mismatch.\n got: %#v\nwant: %#v", st.rw.HeaderMap, wantHeader) + } +} + +func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { + bodyr, bodyw := io.Pipe() + req := &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + "Grpc-Timeout": {"200m"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + Body: bodyr, + } + rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) + ht, err := NewServerHandlerTransport(rw, req) + if err != nil { + t.Fatal(err) + } + runStream := func(s *Stream) { + defer bodyw.Close() + select { + case <-s.ctx.Done(): + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for ctx.Done") + return + } + err := s.ctx.Err() + if err != context.DeadlineExceeded { + t.Errorf("ctx.Err = %v; want %v", err, context.DeadlineExceeded) + return + } + ht.WriteStatus(s, codes.DeadlineExceeded, "too slow") + } + ht.HandleStreams(func(s *Stream) { go runStream(s) }) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message"}, + "Grpc-Status": {"4"}, + "Grpc-Message": {grpcMessageEncode("too slow")}, + } + if !reflect.DeepEqual(rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer Map mismatch.\n got: %#v\nwant: %#v", rw.HeaderMap, wantHeader) + } +} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index b13fb78c4c1..891af9a5d13 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -35,7 +35,7 @@ package transport import ( "bytes" - "errors" + "fmt" "io" "math" "net" @@ -50,6 +50,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // http2Client implements the ClientTransport interface with HTTP2. @@ -88,7 +89,7 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string - authCreds []credentials.Credentials + creds []credentials.PerRPCCredentials mu sync.Mutex // guard the following variables state transportState // the state of underlying connection @@ -117,19 +118,12 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e return nil, ConnectionErrorf("transport: %v", connErr) } var authInfo credentials.AuthInfo - for _, c := range opts.AuthOptions { - if ccreds, ok := c.(credentials.TransportAuthenticator); ok { - scheme = "https" - // TODO(zhaoq): Now the first TransportAuthenticator is used if there are - // multiple ones provided. Revisit this if it is not appropriate. Probably - // place the ClientTransport construction into a separate function to make - // things clear. - if timeout > 0 { - timeout -= time.Since(startT) - } - conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) - break + if opts.TransportCredentials != nil { + scheme = "https" + if timeout > 0 { + timeout -= time.Since(startT) } + conn, authInfo, connErr = opts.TransportCredentials.ClientHandshake(addr, conn, timeout) } if connErr != nil { return nil, ConnectionErrorf("transport: %v", connErr) @@ -139,29 +133,6 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e conn.Close() } }() - // Send connection preface to server. - n, err := conn.Write(clientPreface) - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - if n != len(clientPreface) { - return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - framer := newFramer(conn) - if initialWindowSize != defaultWindowSize { - err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) - } else { - err = framer.writeSettings(true) - } - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - } ua := primaryUA if opts.UserAgent != "" { ua = opts.UserAgent + " " + ua @@ -177,7 +148,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e writableChan: make(chan int, 1), shutdownChan: make(chan struct{}), errorChan: make(chan struct{}), - framer: framer, + framer: newFramer(conn), hBuf: &buf, hEnc: hpack.NewEncoder(&buf), controlBuf: newRecvBuffer(), @@ -186,31 +157,57 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e scheme: scheme, state: reachable, activeStreams: make(map[uint32]*Stream), - authCreds: opts.AuthOptions, + creds: opts.PerRPCCredentials, maxStreams: math.MaxInt32, streamSendQuota: defaultWindowSize, } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, ConnectionErrorf("transport: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + if initialWindowSize != defaultWindowSize { + err = t.framer.writeSettings(true, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(initialWindowSize), + }) + } else { + err = t.framer.writeSettings(true) + } + if err != nil { + t.Close() + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { + t.Close() + return nil, ConnectionErrorf("transport: %v", err) + } + } go t.controller() t.writableChan <- 0 - // Start the reader goroutine for incoming message. The threading model - // on receiving is that each transport has a dedicated goroutine which - // reads HTTP2 frame from network. Then it dispatches the frame to the - // corresponding stream entity. - go t.reader() return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ id: t.nextID, + done: make(chan struct{}), method: callHdr.Method, + sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), - fc: fc, + fc: &inFlow{limit: initialWindowSize}, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } @@ -234,16 +231,22 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea var timeout time.Duration if dl, ok := ctx.Deadline(); ok { timeout = dl.Sub(time.Now()) - if timeout <= 0 { - return nil, ContextErr(context.DeadlineExceeded) - } + } + select { + case <-ctx.Done(): + return nil, ContextErr(ctx.Err()) + default: + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), } // Attach Auth info if there is any. if t.authInfo != nil { - ctx = credentials.NewContext(ctx, t.authInfo) + pr.AuthInfo = t.authInfo } + ctx = peer.NewContext(ctx, pr) authData := make(map[string]string) - for _, c := range t.authCreds { + for _, c := range t.creds { // Construct URI required to get auth request metadata. var port string if pos := strings.LastIndex(t.target, ":"); pos != -1 { @@ -266,6 +269,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return nil, ErrConnClosing + } if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing @@ -273,7 +280,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea checkStreamsQuota := t.streamsQuota != nil t.mu.Unlock() if checkStreamsQuota { - sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + sq, err := wait(ctx, nil, t.shutdownChan, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -282,8 +289,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.streamsQuota.add(sq - 1) } } - if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { - // t.streamsQuota will be updated when t.CloseStream is invoked. + if _, err := wait(ctx, nil, t.shutdownChan, t.writableChan); err != nil { + // Return the quota back now because there is no stream returned to the caller. + if _, ok := err.(StreamError); ok && checkStreamsQuota { + t.streamsQuota.add(1) + } return nil, err } t.mu.Lock() @@ -317,10 +327,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.SendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } for k, v := range authData { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } var ( @@ -330,6 +345,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -344,6 +363,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } else { endHeaders = true } + var flush bool + if endHeaders && (hasMD || callHdr.Flush) { + flush = true + } if first { // Sends a HeadersFrame to server to start a new stream. p := http2.HeadersFrameParam{ @@ -355,11 +378,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // Do a force flush for the buffered frames iff it is the last headers frame // and there is header metadata to be sent. Otherwise, there is flushing until // the corresponding data frame is written. - err = t.framer.writeHeaders(hasMD && endHeaders, p) + err = t.framer.writeHeaders(flush, p) first = false } else { // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) + err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) } if err != nil { t.notifyError(err) @@ -375,22 +398,29 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea func (t *http2Client) CloseStream(s *Stream, err error) { var updateStreams bool t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return + } if t.streamsQuota != nil { updateStreams = true } + if t.state == draining && len(t.activeStreams) == 1 { + // The transport is draining and s is the last live stream on t. + t.mu.Unlock() + t.Close() + return + } delete(t.activeStreams, s.id) t.mu.Unlock() if updateStreams { t.streamsQuota.add(1) } - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), the caller needs - // to call cancel on the stream to interrupt the blocking on - // other goroutines. - s.cancel() s.mu.Lock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) + if q := s.fc.resetPendingData(); q > 0 { + if n := t.fc.onRead(q); n > 0 { + t.controlBuf.put(&windowUpdate{0, n}) + } } if s.state == streamDone { s.mu.Unlock() @@ -412,9 +442,12 @@ func (t *http2Client) CloseStream(s *Stream, err error) { // accessed any more. func (t *http2Client) Close() (err error) { t.mu.Lock() + if t.state == reachable { + close(t.errorChan) + } if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return } t.state = closing t.mu.Unlock() @@ -437,6 +470,25 @@ func (t *http2Client) Close() (err error) { return } +func (t *http2Client) GracefulClose() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return nil + } + if t.state == draining { + t.mu.Unlock() + return nil + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + return t.Close() + } + return nil +} + // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. // TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later @@ -449,15 +501,15 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + sq, err := wait(s.ctx, s.done, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, s.done, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { - if _, ok := err.(StreamError); ok { + if _, ok := err.(StreamError); ok || err == io.EOF { t.sendQuotaPool.cancel() } return err @@ -489,7 +541,11 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { // Indicate there is a writer who is about to write a data frame. t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, s.done, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok || err == io.EOF { + // Return the connection quota back. + t.sendQuotaPool.add(len(p)) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -499,6 +555,16 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(len(p)) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { // Do a force flush iff this is last frame for the entire gRPC message // and the caller is the only writer at this moment. @@ -524,11 +590,7 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { } s.mu.Lock() if s.state != streamDone { - if s.state == streamReadDone { - s.state = streamDone - } else { - s.state = streamWriteDone - } + s.state = streamWriteDone } s.mu.Unlock() return nil @@ -537,55 +599,62 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - if s, ok := t.activeStreams[f.Header().StreamID]; ok { - return s, true - } - return nil, false + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Client) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Client) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + t.notifyError(ConnectionErrorf("%v", err)) + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } + return + } + if err := s.fc.onData(uint32(size)); err != nil { s.state = streamDone s.statusCode = codes.Internal s.statusDesc = err.Error() + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -597,13 +666,14 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // the read direction is closed, and set the status appropriately. if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { s.mu.Lock() - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone + if s.state == streamDone { + s.mu.Unlock() + return } + s.state = streamDone s.statusCode = codes.Internal s.statusDesc = "server closed the stream without sending trailers" + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -624,10 +694,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { close(s.headerChan) s.headerDone = true } - s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] + s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + s.statusCode = codes.Unknown } + s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode) + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -646,7 +719,9 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { } func (t *http2Client) handlePing(f *http2.PingFrame) { - t.controlBuf.put(&ping{true}) + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { @@ -665,52 +740,59 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeClientHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return } - if err != nil { - s.write(recvMsg{err: err}) - // Something wrong. Stops reading even when there is remaining. - return nil + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) } - if !endHeaders { - return s + if state.err != nil { + s.write(recvMsg{err: state.err}) + // Something wrong. Stops reading even when there is remaining. + return } + endStream := frame.StreamEnded() + s.mu.Lock() + if !endStream { + s.recvCompress = state.encoding + } if !s.headerDone { - if !endStream && len(hDec.state.mdata) > 0 { - s.header = hDec.state.mdata + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata } close(s.headerChan) s.headerDone = true } if !endStream || s.state == streamDone { s.mu.Unlock() - return nil + return } - if len(hDec.state.mdata) > 0 { - s.trailer = hDec.state.mdata + if len(state.mdata) > 0 { + s.trailer = state.mdata } + s.statusCode = state.statusCode + s.statusDesc = state.statusDesc + close(s.done) s.state = streamDone - s.statusCode = hDec.state.statusCode - s.statusDesc = hDec.state.statusDesc s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - return nil +} + +func handleMalformedHTTP2(s *Stream, err error) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) } // reader runs as a separate goroutine in charge of reading data from network @@ -733,25 +815,31 @@ func (t *http2Client) reader() { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() if err != nil { - t.notifyError(err) - return + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + } + continue + } else { + // Transport error. + t.notifyError(err) + return + } } switch frame := frame.(type) { - case *http2.HeadersFrame: - // operateHeaders has to be invoked regardless the value of curStream - // because the HPACK decoder needs to be updated using the received - // headers. - curStream, _ = t.getStream(frame) - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, false) + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -827,9 +915,7 @@ func (t *http2Client) controller() { case *flushIO: t.framer.flushWrite() case *ping: - // TODO(zhaoq): Ack with all-0 data now. will change to some - // meaningful content when this is actually in use. - t.framer.writePing(true, i.ack, [8]byte{}) + t.framer.writePing(true, i.ack, i.data) default: grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) } diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index f3488f83dca..168326aadd0 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -49,6 +49,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // ErrIllegalHeaderWrite indicates that setting header is illegal because of @@ -61,8 +62,8 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. + // A writer acquires the write lock by receiving a value on writableChan + // and releases it by sending on writableChan. writableChan chan int // shutdownChan is closed when Close is called. // Blocking operations should select on shutdownChan to avoid @@ -99,10 +100,15 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI if maxStreams == 0 { maxStreams = math.MaxUint32 } else { - settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) + settings = append(settings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) } if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + settings = append(settings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(initialWindowSize)}) } if err := framer.writeSettings(true, settings...); err != nil { return nil, ConnectionErrorf("transport: %v", err) @@ -135,75 +141,77 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI return t, nil } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeServerHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) { + buf := newRecvBuffer() + s := &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: &inFlow{limit: initialWindowSize}, } - if err != nil { - grpclog.Printf("transport: http2Server.operateHeader found %v", err) + + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if err := state.err; err != nil { if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } - return nil + return } - if endStream { + + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if !endHeaders { - return s - } - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - return nil - } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return nil - } - s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - t.activeStreams[s.id] = s - t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) - } - if hDec.state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) + s.recvCompress = state.encoding + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } // Attach Auth info if there is any. if t.authInfo != nil { - s.ctx = credentials.NewContext(s.ctx, t.authInfo) + pr.AuthInfo = t.authInfo } + s.ctx = peer.NewContext(s.ctx, pr) // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. - if len(hDec.state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) + if len(state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } - s.method = hDec.state.method + s.recvCompress = state.encoding + s.method = state.method + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + return + } + s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) + t.activeStreams[s.id] = s + t.mu.Unlock() + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) + } handle(s) - return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -236,16 +244,24 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream for { frame, err := t.framer.readFrame() if err != nil { + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s) + } + t.controlBuf.put(&resetStream{se.StreamID, se.Code}) + continue + } t.Close() return } switch frame := frame.(type) { - case *http2.HeadersFrame: + case *http2.MetaHeadersFrame: id := frame.Header().StreamID if id%2 != 1 || id <= t.maxStreamID { // illegal gRPC stream id. @@ -254,21 +270,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { break } t.maxStreamID = id - buf := newRecvBuffer() - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } - curStream = &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: fc, - } - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, false, handle) + t.operateHeaders(frame, handle) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -306,33 +308,51 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Server) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Server) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.mu.Unlock() t.closeStream(s) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -344,11 +364,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // Received the end of stream from the client. s.mu.Lock() if s.state != streamDone { - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone - } + s.state = streamReadDone } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) @@ -377,7 +393,9 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } func (t *http2Server) handlePing(f *http2.PingFrame) { - t.controlBuf.put(&ping{true}) + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) } func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -433,13 +451,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } s.headerOk = true s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } for k, v := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -466,7 +491,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s headersSent = true } s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() @@ -479,9 +504,13 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s Name: "grpc-status", Value: strconv.Itoa(int(statusCode)), }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: grpcMessageEncode(statusDesc)}) // Attach the trailer metadata. for k, v := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -501,18 +530,25 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { // TODO(zhaoq): Support multi-writers for a single stream. var writeHeaderFrame bool s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return StreamErrorf(codes.Unknown, "the stream has been done") + } if !s.headerOk { writeHeaderFrame = true s.headerOk = true } s.mu.Unlock() if writeHeaderFrame { - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Bytes(), @@ -532,13 +568,13 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + sq, err := wait(s.ctx, nil, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, nil, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { if _, ok := err.(StreamError); ok { t.sendQuotaPool.cancel() @@ -564,7 +600,11 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the // transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok { + // Return the connection quota back. + t.sendQuotaPool.add(ps) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -574,6 +614,16 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(ps) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } var forceFlush bool if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { forceFlush = true @@ -628,9 +678,7 @@ func (t *http2Server) controller() { case *flushIO: t.framer.flushWrite() case *ping: - // TODO(zhaoq): Ack with all-0 data now. will change to some - // meaningful content when this is actually in use. - t.framer.writePing(true, i.ack, [8]byte{}) + t.framer.writePing(true, i.ack, i.data) default: grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) } @@ -660,9 +708,9 @@ func (t *http2Server) Close() (err error) { t.mu.Unlock() close(t.shutdownChan) err = t.conn.Close() - // Notify all active streams. + // Cancel all active streams. for _, s := range streams { - s.write(recvMsg{err: ErrConnClosing}) + s.cancel() } return } @@ -673,21 +721,22 @@ func (t *http2Server) closeStream(s *Stream) { t.mu.Lock() delete(t.activeStreams, s.id) t.mu.Unlock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) - } + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() s.mu.Lock() + if q := s.fc.resetPendingData(); q > 0 { + if w := t.fc.onRead(q); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), the caller needs - // to call cancel on the stream to interrupt the blocking on - // other goroutines. - s.cancel() } func (t *http2Server) RemoteAddr() net.Addr { diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index fec4e4755d9..be4925e8128 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -62,13 +62,14 @@ const ( ) var ( - clientPreface = []byte(http2.ClientPreface) - http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ http2.ErrCodeNo: codes.Internal, http2.ErrCodeProtocol: codes.Internal, http2.ErrCodeInternal: codes.Internal, http2.ErrCodeFlowControl: codes.ResourceExhausted, http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, http2.ErrCodeFrameSize: codes.Internal, http2.ErrCodeRefusedStream: codes.Unavailable, http2.ErrCodeCancel: codes.Canceled, @@ -76,6 +77,7 @@ var ( http2.ErrCodeConnect: codes.Internal, http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, } statusCodeConvTab = map[codes.Code]http2.ErrCode{ codes.Internal: http2.ErrCodeInternal, @@ -89,6 +91,9 @@ var ( // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { + err error // first error encountered decoding + + encoding string // statusCode caches the stream status received from the trailer // the server sent. Client side only. statusCode codes.Code @@ -101,25 +106,11 @@ type decodeState struct { mdata map[string][]string } -// An hpackDecoder decodes HTTP2 headers which may span multiple frames. -type hpackDecoder struct { - h *hpack.Decoder - state decodeState - err error // The err when decoding -} - -// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. -type headerFrame interface { - Header() http2.FrameHeader - HeaderBlockFragment() []byte - HeadersEnded() bool -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. func isReservedHeader(hdr string) bool { - if hdr[0] == ':' { + if hdr != "" && hdr[0] == ':' { return true } switch hdr { @@ -136,98 +127,86 @@ func isReservedHeader(hdr string) bool { } } -func newHPACKDecoder() *hpackDecoder { - d := &hpackDecoder{} - d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { - switch f.Name { - case "content-type": - if !strings.Contains(f.Value, "application/grpc") { - d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header") - return - } - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.state.statusCode = codes.Code(code) - case "grpc-message": - d.state.statusDesc = f.Value - case "grpc-timeout": - d.state.timeoutSet = true - var err error - d.state.timeout, err = timeoutDecode(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) - return - } - case ":path": - d.state.method = f.Value - default: - if !isReservedHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } - if d.state.mdata == nil { - d.state.mdata = make(map[string][]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return - } - d.state.mdata[k] = append(d.state.mdata[k], v) - } - } - }) - return d +// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders +// that should be propagated into metadata visible to users. +func isWhitelistedPseudoHeader(hdr string) bool { + switch hdr { + case ":authority": + return true + default: + return false + } } -func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) +func (d *decodeState) setErr(err error) { + if d.err == nil { + d.err = err } +} - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) - } - endHeaders = true +func validContentType(t string) bool { + e := "application/grpc" + if !strings.HasPrefix(t, e) { + return false } - - if err == nil && d.err != nil { - err = d.err + // Support variations on the content-type + // (e.g. "application/grpc+blah", "application/grpc;blah"). + if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { + return false } - return + return true } -func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) - } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + if !validContentType(f.Value) { + d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) + return + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) + return + } + d.statusCode = codes.Code(code) + case "grpc-message": + d.statusDesc = grpcMessageDecode(f.Value) + case "grpc-timeout": + d.timeoutSet = true + var err error + d.timeout, err = timeoutDecode(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) + return + } + case ":path": + d.method = f.Value + default: + if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] + } + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return + } + d.mdata[k] = append(d.mdata[k], v) } - endHeaders = true - } - - if err == nil && d.err != nil { - err = d.err } - return } type timeoutUnit uint8 @@ -318,10 +297,11 @@ type framer struct { func newFramer(conn net.Conn) *framer { f := &framer{ - reader: conn, + reader: bufio.NewReaderSize(conn, http2IOBufSize), writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } @@ -449,3 +429,7 @@ func (f *framer) flushWrite() error { func (f *framer) readFrame() (http2.Frame, error) { return f.fr.ReadFrame() } + +func (f *framer) errorDetail() error { + return f.fr.ErrorDetail() +} diff --git a/vendor/google.golang.org/grpc/transport/http_util_test.go b/vendor/google.golang.org/grpc/transport/http_util_test.go new file mode 100644 index 00000000000..279acbc525e --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http_util_test.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "fmt" + "testing" + "time" +) + +func TestTimeoutEncode(t *testing.T) { + for _, test := range []struct { + in string + out string + }{ + {"12345678ns", "12345678n"}, + {"123456789ns", "123457u"}, + {"12345678us", "12345678u"}, + {"123456789us", "123457m"}, + {"12345678ms", "12345678m"}, + {"123456789ms", "123457S"}, + {"12345678s", "12345678S"}, + {"123456789s", "2057614M"}, + {"12345678m", "12345678M"}, + {"123456789m", "2057614H"}, + } { + d, err := time.ParseDuration(test.in) + if err != nil { + t.Fatalf("failed to parse duration string %s: %v", test.in, err) + } + out := timeoutEncode(d) + if out != test.out { + t.Fatalf("timeoutEncode(%s) = %s, want %s", test.in, out, test.out) + } + } +} + +func TestTimeoutDecode(t *testing.T) { + for _, test := range []struct { + // input + s string + // output + d time.Duration + err error + }{ + {"1234S", time.Second * 1234, nil}, + {"1234x", 0, fmt.Errorf("transport: timeout unit is not recognized: %q", "1234x")}, + {"1", 0, fmt.Errorf("transport: timeout string is too short: %q", "1")}, + {"", 0, fmt.Errorf("transport: timeout string is too short: %q", "")}, + } { + d, err := timeoutDecode(test.s) + if d != test.d || fmt.Sprint(err) != fmt.Sprint(test.err) { + t.Fatalf("timeoutDecode(%q) = %d, %v, want %d, %v", test.s, int64(d), err, int64(test.d), test.err) + } + } +} + +func TestValidContentType(t *testing.T) { + tests := []struct { + h string + want bool + }{ + {"application/grpc", true}, + {"application/grpc+", true}, + {"application/grpc+blah", true}, + {"application/grpc;", true}, + {"application/grpc;blah", true}, + {"application/grpcd", false}, + {"application/grpd", false}, + {"application/grp", false}, + } + for _, tt := range tests { + got := validContentType(tt.h) + if got != tt.want { + t.Errorf("validContentType(%q) = %v; want %v", tt.h, got, tt.want) + } + } +} diff --git a/vendor/google.golang.org/grpc/transport/testdata/ca.pem b/vendor/google.golang.org/grpc/transport/testdata/ca.pem new file mode 100644 index 00000000000..6c8511a73c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/transport/testdata/server1.key b/vendor/google.golang.org/grpc/transport/testdata/server1.key new file mode 100644 index 00000000000..143a5b87658 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/transport/testdata/server1.pem b/vendor/google.golang.org/grpc/transport/testdata/server1.pem new file mode 100644 index 00000000000..f3d43fcc5be --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index e1e7f5761af..42472fd28e7 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -43,6 +43,7 @@ import ( "fmt" "io" "net" + "strconv" "sync" "time" @@ -63,13 +64,11 @@ type recvMsg struct { err error } -func (recvMsg) isItem() bool { - return true -} +func (*recvMsg) item() {} // All items in an out of a recvBuffer should be the same type. type item interface { - isItem() bool + item() } // recvBuffer is an unbounded channel of item. @@ -89,12 +88,14 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r item) { b.mu.Lock() defer b.mu.Unlock() - b.backlog = append(b.backlog, r) - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: + if len(b.backlog) == 0 { + select { + case b.c <- r: + return + default: + } } + b.backlog = append(b.backlog, r) } func (b *recvBuffer) load() { @@ -158,7 +159,7 @@ const ( streamActive streamState = iota streamWriteDone // EndStream sent streamReadDone // EndStream received - streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. + streamDone // the entire stream is finished. ) // Stream represents an RPC in the transport layer. @@ -169,12 +170,16 @@ type Stream struct { // ctx is the associated context of the stream. ctx context.Context cancel context.CancelFunc + // done is closed when the final status arrives. + done chan struct{} // method records the associated RPC method of the stream. - method string - buf *recvBuffer - dec io.Reader - fc *inFlow - recvQuota uint32 + method string + recvCompress string + sendCompress string + buf *recvBuffer + dec io.Reader + fc *inFlow + recvQuota uint32 // The accumulated inbound quota pending for window update. updateQuota uint32 // The handler to control the window update procedure for both this @@ -201,6 +206,21 @@ type Stream struct { statusDesc string } +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +func (s *Stream) Done() <-chan struct{} { + return s.done +} + // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no // header metadata or iii) the stream is cancelled/expired. @@ -286,20 +306,18 @@ func (s *Stream) Read(p []byte) (n int, err error) { return } -type key int - // The key to save transport.Stream in the context. -const streamKey = key(0) +type streamKey struct{} // newContextWithStream creates a new context from ctx and attaches stream // to it. func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey, stream) + return context.WithValue(ctx, streamKey{}, stream) } // StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey).(*Stream) + s, ok = ctx.Value(streamKey{}).(*Stream) return } @@ -310,6 +328,7 @@ const ( reachable transportState = iota unreachable closing + draining ) // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -324,9 +343,11 @@ type ConnectOptions struct { UserAgent string // Dialer specifies how to dial a network address. Dialer func(string, time.Duration) (net.Conn, error) - // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. - AuthOptions []credentials.Credentials - // Timeout specifies the timeout for dialing a client connection. + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client connection. + TransportCredentials credentials.TransportCredentials + // Timeout specifies the timeout for dialing a ClientTransport. Timeout time.Duration } @@ -339,20 +360,40 @@ func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, e // Options provides additional hints and information for message // transmission. type Options struct { - // Indicate whether it is the last piece for this stream. + // Last indicates whether this write is the last piece for + // this stream. Last bool - // The hint to transport impl whether the data could be buffered for - // batching write. Transport impl can feel free to ignore it. + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // Transport implementation may ignore the hint. Delay bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { - Host string // peer host - Method string // the operation to perform on the specified host + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // RecvCompress specifies the compression algorithm applied on + // inbound messages. + RecvCompress string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. The transport may modify the flush decision + // for performance purposes. + Flush bool } -// ClientTransport is the common interface for all gRPC client side transport +// ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport @@ -360,6 +401,10 @@ type ClientTransport interface { // is called only once. Close() error + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error + // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error @@ -381,21 +426,33 @@ type ClientTransport interface { Error() <-chan struct{} } -// ServerTransport is the common interface for all gRPC server side transport +// ServerTransport is the common interface for all gRPC server-side transport // implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. type ServerTransport interface { - // WriteStatus sends the status of a stream to the client. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error - // Write sends the data for the given stream. - Write(s *Stream, data []byte, opts *Options) error - // WriteHeader sends the header metedata for the given stream. - WriteHeader(s *Stream, md metadata.MD) error // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. + // WriteStatus is the final call made on a stream and always + // occurs. + WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error + // RemoteAddr returns the remote network address. RemoteAddr() net.Addr } @@ -425,7 +482,7 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } -// Define some common ConnectionErrors. +// ErrConnClosing indicates that the transport is closing. var ErrConnClosing = ConnectionError{Desc: "transport is closing"} // StreamError is an error that only affects one stream within a connection. @@ -451,15 +508,96 @@ func ContextErr(err error) StreamError { // wait blocks until it can receive from ctx.Done, closing, or proceed. // If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. +// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise +// it return the StreamError for ctx.Err. // If it receives from closing, it returns 0, ErrConnClosing. // If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { +func wait(ctx context.Context, done, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) + case <-done: + // User cancellation has precedence. + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + default: + } + return 0, io.EOF case <-closing: return 0, ErrConnClosing case i := <-proceed: return i, nil } } + +const ( + spaceByte = ' ' + tildaByte = '~' + percentByte = '%' +) + +// grpcMessageEncode encodes the grpc-message field in the same +// manner as https://github.com/grpc/grpc-java/pull/1517. +func grpcMessageEncode(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c < tildaByte && c != percentByte) { + return grpcMessageEncodeUnchecked(msg) + } + } + return msg +} + +func grpcMessageEncodeUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c >= spaceByte && c < tildaByte && c != percentByte { + _ = buf.WriteByte(c) + } else { + _, _ = buf.WriteString(fmt.Sprintf("%%%02X", c)) + } + } + return buf.String() +} + +// grpcMessageDecode decodes the grpc-message field in the same +// manner as https://github.com/grpc/grpc-java/pull/1517. +func grpcMessageDecode(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return grpcMessageDecodeUnchecked(msg) + } + } + return msg +} + +func grpcMessageDecodeUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseInt(msg[i+1:i+3], 16, 8) + if err != nil { + _ = buf.WriteByte(c) + } else { + _ = buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + _ = buf.WriteByte(c) + } + } + return buf.String() +} diff --git a/vendor/google.golang.org/grpc/transport/transport_test.go b/vendor/google.golang.org/grpc/transport/transport_test.go new file mode 100644 index 00000000000..6dd01d851ed --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/transport_test.go @@ -0,0 +1,797 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "net" + "reflect" + "strconv" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" +) + +type server struct { + lis net.Listener + port string + startedErr chan error // error (or nil) with server start value + mu sync.Mutex + conns map[ServerTransport]bool +} + +var ( + expectedRequest = []byte("ping") + expectedResponse = []byte("pong") + expectedRequestLarge = make([]byte, initialWindowSize*2) + expectedResponseLarge = make([]byte, initialWindowSize*2) +) + +type testStreamHandler struct { + t ServerTransport +} + +type hType int + +const ( + normal hType = iota + suspended + misbehaved + encodingRequiredStatus +) + +func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { + req := expectedRequest + resp := expectedResponse + if s.Method() == "foo.Large" { + req = expectedRequestLarge + resp = expectedResponseLarge + } + p := make([]byte, len(req)) + _, err := io.ReadFull(s, p) + if err != nil { + return + } + if !bytes.Equal(p, req) { + t.Fatalf("handleStream got %v, want %v", p, req) + } + // send a response back to the client. + h.t.Write(s, resp, &Options{}) + // send the trailer to end the stream. + h.t.WriteStatus(s, codes.OK, "") +} + +// handleStreamSuspension blocks until s.ctx is canceled. +func (h *testStreamHandler) handleStreamSuspension(s *Stream) { + go func() { + <-s.ctx.Done() + }() +} + +func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { + conn, ok := s.ServerTransport().(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", s.ServerTransport()) + } + size := 1 + if s.Method() == "foo.MaxFrame" { + size = http2MaxFrameLen + } + // Drain the client side stream flow control window. + var sent int + for sent <= initialWindowSize { + <-conn.writableChan + if err := conn.framer.writeData(true, s.id, false, make([]byte, size)); err != nil { + conn.writableChan <- 0 + break + } + conn.writableChan <- 0 + sent += size + } +} + +func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) { + // raw newline is not accepted by http2 framer so it must be encoded. + h.t.WriteStatus(s, codes.Internal, "\n") +} + +// start starts server. Other goroutines should block on s.readyChan for further operations. +func (s *server) start(t *testing.T, port int, maxStreams uint32, ht hType) { + var err error + if port == 0 { + s.lis, err = net.Listen("tcp", "localhost:0") + } else { + s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) + } + if err != nil { + s.startedErr <- fmt.Errorf("failed to listen: %v", err) + return + } + _, p, err := net.SplitHostPort(s.lis.Addr().String()) + if err != nil { + s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) + return + } + s.port = p + s.conns = make(map[ServerTransport]bool) + s.startedErr <- nil + for { + conn, err := s.lis.Accept() + if err != nil { + return + } + transport, err := NewServerTransport("http2", conn, maxStreams, nil) + if err != nil { + return + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + transport.Close() + return + } + s.conns[transport] = true + s.mu.Unlock() + h := &testStreamHandler{transport} + switch ht { + case suspended: + go transport.HandleStreams(h.handleStreamSuspension) + case misbehaved: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamMisbehave(t, s) + }) + case encodingRequiredStatus: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamEncodingRequiredStatus(t, s) + }) + default: + go transport.HandleStreams(func(s *Stream) { + go h.handleStream(t, s) + }) + } + } +} + +func (s *server) wait(t *testing.T, timeout time.Duration) { + select { + case err := <-s.startedErr: + if err != nil { + t.Fatal(err) + } + case <-time.After(timeout): + t.Fatalf("Timed out after %v waiting for server to be ready", timeout) + } +} + +func (s *server) stop() { + s.lis.Close() + s.mu.Lock() + for c := range s.conns { + c.Close() + } + s.conns = nil + s.mu.Unlock() +} + +func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, ClientTransport) { + server := &server{startedErr: make(chan error, 1)} + go server.start(t, port, maxStreams, ht) + server.wait(t, 2*time.Second) + addr := "localhost:" + server.port + var ( + ct ClientTransport + connErr error + ) + ct, connErr = NewClientTransport(addr, &ConnectOptions{}) + if connErr != nil { + t.Fatalf("failed to create transport: %v", connErr) + } + return server, ct +} + +func TestClientSendAndReceive(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s1, err1 := ct.NewStream(context.Background(), callHdr) + if err1 != nil { + t.Fatalf("failed to open stream: %v", err1) + } + if s1.id != 1 { + t.Fatalf("wrong stream id: %d", s1.id) + } + s2, err2 := ct.NewStream(context.Background(), callHdr) + if err2 != nil { + t.Fatalf("failed to open stream: %v", err2) + } + if s2.id != 3 { + t.Fatalf("wrong stream id: %d", s2.id) + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s1, expectedRequest, &opts); err != nil { + t.Fatalf("failed to send data: %v", err) + } + p := make([]byte, len(expectedResponse)) + _, recvErr := io.ReadFull(s1, p) + if recvErr != nil || !bytes.Equal(p, expectedResponse) { + t.Fatalf("Error: %v, want ; Result: %v, want %v", recvErr, p, expectedResponse) + } + _, recvErr = io.ReadFull(s1, p) + if recvErr != io.EOF { + t.Fatalf("Error: %v; want ", recvErr) + } + ct.Close() + server.stop() +} + +func TestClientErrorNotify(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + go server.stop() + // ct.reader should detect the error and activate ct.Error(). + <-ct.Error() + ct.Close() +} + +func performOneRPC(ct ClientTransport) { + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + return + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s, expectedRequest, &opts); err == nil { + time.Sleep(5 * time.Millisecond) + // The following s.Recv()'s could error out because the + // underlying transport is gone. + // + // Read response + p := make([]byte, len(expectedResponse)) + io.ReadFull(s, p) + // Read io.EOF + io.ReadFull(s, p) + } +} + +func TestClientMix(t *testing.T) { + s, ct := setUp(t, 0, math.MaxUint32, normal) + go func(s *server) { + time.Sleep(5 * time.Second) + s.stop() + }(s) + go func(ct ClientTransport) { + <-ct.Error() + ct.Close() + }(ct) + for i := 0; i < 1000; i++ { + time.Sleep(10 * time.Millisecond) + go performOneRPC(ct) + } +} + +func TestLargeMessage(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + if err := ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil { + t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponseLarge)) + if _, err := io.ReadFull(s, p); err != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("io.ReadFull(_, %v) = _, %v, want %v, ", err, p, expectedResponse) + } + if _, err = io.ReadFull(s, p); err != io.EOF { + t.Errorf("Failed to complete the stream %v; want ", err) + } + }() + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestGracefulClose(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + if err = ct.GracefulClose(); err != nil { + t.Fatalf("%v.GracefulClose() = %v, want ", ct, err) + } + var wg sync.WaitGroup + // Expect the failure for all the follow-up streams because ct has been closed gracefully. + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if _, err := ct.NewStream(context.Background(), callHdr); err != ErrConnClosing { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, %v", ct, err, ErrConnClosing) + } + }() + } + opts := Options{ + Last: true, + Delay: false, + } + // The stream which was created before graceful close can still proceed. + if err := ct.Write(s, expectedRequest, &opts); err != nil { + t.Fatalf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponse)) + if _, err := io.ReadFull(s, p); err != nil || !bytes.Equal(p, expectedResponse) { + t.Fatalf("io.ReadFull(_, %v) = _, %v, want %v, ", err, p, expectedResponse) + } + if _, err = io.ReadFull(s, p); err != io.EOF { + t.Fatalf("Failed to complete the stream %v; want ", err) + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestLargeMessageSuspension(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + // Set a long enough timeout for writing a large message out. + ctx, _ := context.WithTimeout(context.Background(), time.Second) + s, err := ct.NewStream(ctx, callHdr) + if err != nil { + t.Fatalf("failed to open stream: %v", err) + } + // Write should not be done successfully due to flow control. + err = ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}) + expectedErr := StreamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded) + if err == nil || err != expectedErr { + t.Fatalf("Write got %v, want %v", err, expectedErr) + } + ct.Close() + server.stop() +} + +func TestMaxStreams(t *testing.T) { + server, ct := setUp(t, 0, 1, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + // Have a pending stream which takes all streams quota. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + done := make(chan struct{}) + ch := make(chan int) + ready := make(chan struct{}) + go func() { + for { + select { + case <-time.After(5 * time.Millisecond): + select { + case ch <- 0: + case <-ready: + return + } + case <-time.After(5 * time.Second): + close(done) + return + case <-ready: + return + } + } + }() + for { + select { + case <-ch: + case <-done: + t.Fatalf("Client has not received the max stream setting in 5 seconds.") + } + cc.mu.Lock() + // cc.streamsQuota should be initialized once receiving the 1st setting frame from + // the server. + if cc.streamsQuota != nil { + cc.mu.Unlock() + select { + case <-cc.streamsQuota.acquire(): + t.Fatalf("streamsQuota.acquire() becomes readable mistakenly.") + default: + if cc.streamsQuota.quota != 0 { + t.Fatalf("streamsQuota.quota got non-zero quota mistakenly.") + } + } + break + } + cc.mu.Unlock() + } + close(ready) + // Close the pending stream so that the streams quota becomes available for the next new stream. + ct.CloseStream(s, nil) + select { + case i := <-cc.streamsQuota.acquire(): + if i != 1 { + t.Fatalf("streamsQuota.acquire() got %d quota, want 1.", i) + } + cc.streamsQuota.add(i) + default: + t.Fatalf("streamsQuota.acquire() is not readable.") + } + if _, err := ct.NewStream(context.Background(), callHdr); err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + ct.Close() + server.stop() +} + +func TestServerContextCanceledOnClosedConnection(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + var sc *http2Server + // Wait until the server transport is setup. + for { + server.mu.Lock() + if len(server.conns) == 0 { + server.mu.Unlock() + time.Sleep(time.Millisecond) + continue + } + for k := range server.conns { + var ok bool + sc, ok = k.(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", k) + } + } + server.mu.Unlock() + break + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + // Make sure the headers frame is flushed out. + <-cc.writableChan + if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil { + t.Fatalf("Failed to write data: %v", err) + } + cc.writableChan <- 0 + // Loop until the server side stream is created. + var ss *Stream + for { + time.Sleep(time.Second) + sc.mu.Lock() + if len(sc.activeStreams) == 0 { + sc.mu.Unlock() + continue + } + ss = sc.activeStreams[s.id] + sc.mu.Unlock() + break + } + cc.Close() + select { + case <-ss.Context().Done(): + if ss.Context().Err() != context.Canceled { + t.Fatalf("ss.Context().Err() got %v, want %v", ss.Context().Err(), context.Canceled) + } + case <-time.After(5 * time.Second): + t.Fatalf("Failed to cancel the context of the sever side stream.") + } +} + +func TestServerWithMisbehavedClient(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + var sc *http2Server + // Wait until the server transport is setup. + for { + server.mu.Lock() + if len(server.conns) == 0 { + server.mu.Unlock() + time.Sleep(time.Millisecond) + continue + } + for k := range server.conns { + var ok bool + sc, ok = k.(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", k) + } + } + server.mu.Unlock() + break + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + // Test server behavior for violation of stream flow control window size restriction. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + var sent int + // Drain the stream flow control window + <-cc.writableChan + if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil { + t.Fatalf("Failed to write data: %v", err) + } + cc.writableChan <- 0 + sent += http2MaxFrameLen + // Wait until the server creates the corresponding stream and receive some data. + var ss *Stream + for { + time.Sleep(time.Millisecond) + sc.mu.Lock() + if len(sc.activeStreams) == 0 { + sc.mu.Unlock() + continue + } + ss = sc.activeStreams[s.id] + sc.mu.Unlock() + ss.fc.mu.Lock() + if ss.fc.pendingData > 0 { + ss.fc.mu.Unlock() + break + } + ss.fc.mu.Unlock() + } + if ss.fc.pendingData != http2MaxFrameLen || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != http2MaxFrameLen || sc.fc.pendingUpdate != 0 { + t.Fatalf("Server mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, http2MaxFrameLen, 0, http2MaxFrameLen, 0) + } + // Keep sending until the server inbound window is drained for that stream. + for sent <= initialWindowSize { + <-cc.writableChan + if err = cc.framer.writeData(true, s.id, false, make([]byte, 1)); err != nil { + t.Fatalf("Failed to write data: %v", err) + } + cc.writableChan <- 0 + sent++ + } + // Server sent a resetStream for s already. + code := http2ErrConvTab[http2.ErrCodeFlowControl] + if _, err := io.ReadFull(s, make([]byte, 1)); err != io.EOF || s.statusCode != code { + t.Fatalf("%v got err %v with statusCode %d, want err with statusCode %d", s, err, s.statusCode, code) + } + + if ss.fc.pendingData != 0 || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != 0 || sc.fc.pendingUpdate <= initialWindowSize { + t.Fatalf("Server mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, >%d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, initialWindowSize) + } + ct.CloseStream(s, nil) + // Test server behavior for violation of connection flow control window size restriction. + // + // Keep creating new streams until the connection window is drained on the server and + // the server tears down the connection. + for { + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + // The server tears down the connection. + break + } + <-cc.writableChan + cc.framer.writeData(true, s.id, true, make([]byte, http2MaxFrameLen)) + cc.writableChan <- 0 + } + ct.Close() + server.stop() +} + +func TestClientWithMisbehavedServer(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, misbehaved) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + conn, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + // Test the logic for the violation of stream flow control window size restriction. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { + t.Fatalf("Failed to write: %v", err) + } + // Read without window update. + for { + p := make([]byte, http2MaxFrameLen) + if _, err = s.dec.Read(p); err != nil { + break + } + } + if s.fc.pendingData <= initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData <= initialWindowSize || conn.fc.pendingUpdate != 0 { + t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want >%d, %d, >%d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0) + } + if err != io.EOF || s.statusCode != codes.Internal { + t.Fatalf("Got err %v and the status code %d, want and the code %d", err, s.statusCode, codes.Internal) + } + conn.CloseStream(s, err) + if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate <= initialWindowSize { + t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, >%d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize) + } + // Test the logic for the violation of the connection flow control window size restriction. + // + // Generate enough streams to drain the connection window. + callHdr = &CallHdr{ + Host: "localhost", + Method: "foo.MaxFrame", + } + // Make the server flood the traffic to violate flow control window size of the connection. + for { + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + break + } + if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { + break + } + } + // http2Client.errChan is closed due to connection flow control window size violation. + <-conn.Error() + ct.Close() + server.stop() +} + +func TestEncodingRequiredStatus(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, encodingRequiredStatus) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + return + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s, expectedRequest, &opts); err != nil { + t.Fatalf("Failed to write the request: %v", err) + } + if _, err = ioutil.ReadAll(s); err != nil { + t.Fatal(err) + } + ct.Close() + server.stop() +} + +func TestStreamContext(t *testing.T) { + expectedStream := Stream{} + ctx := newContextWithStream(context.Background(), &expectedStream) + s, ok := StreamFromContext(ctx) + if !ok || !reflect.DeepEqual(expectedStream, *s) { + t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, *s, ok, expectedStream) + } +} + +func TestIsReservedHeader(t *testing.T) { + tests := []struct { + h string + want bool + }{ + {"", false}, // but should be rejected earlier + {"foo", false}, + {"content-type", true}, + {"grpc-message-type", true}, + {"grpc-encoding", true}, + {"grpc-message", true}, + {"grpc-status", true}, + {"grpc-timeout", true}, + {"te", true}, + } + for _, tt := range tests { + got := isReservedHeader(tt.h) + if got != tt.want { + t.Errorf("isReservedHeader(%q) = %v; want %v", tt.h, got, tt.want) + } + } +} + +func TestGrpcMessageEncode(t *testing.T) { + testGrpcMessageEncode(t, "my favorite character is \u0000", "my favorite character is %00") + testGrpcMessageEncode(t, "my favorite character is %", "my favorite character is %25") +} + +func TestGrpcMessageDecode(t *testing.T) { + testGrpcMessageDecode(t, "Hello", "Hello") + testGrpcMessageDecode(t, "H%61o", "Hao") + testGrpcMessageDecode(t, "H%6", "H%6") + testGrpcMessageDecode(t, "%G0", "%G0") +} + +func testGrpcMessageEncode(t *testing.T, input string, expected string) { + actual := grpcMessageEncode(input) + if expected != actual { + t.Errorf("Expected %s from grpcMessageEncode, got %s", expected, actual) + } +} + +func testGrpcMessageDecode(t *testing.T, input string, expected string) { + actual := grpcMessageDecode(input) + if expected != actual { + t.Errorf("Expected %s from grpcMessageDecode, got %s", expected, actual) + } +}